From 80f72e687cfb3ffcee1547d4dc67ec4b59dad871 Mon Sep 17 00:00:00 2001 From: "K. S. Ernest (iFire) Lee" Date: Thu, 9 May 2024 05:09:25 -0700 Subject: [PATCH] Add csg boolean operators using elalish/manifold. --- modules/csg/SCsub | 59 + modules/csg/csg.cpp | 1547 ---- modules/csg/csg.h | 204 - modules/csg/csg_shape.cpp | 320 +- modules/csg/csg_shape.h | 3 +- thirdparty/manifold/.gitrepo | 12 + thirdparty/manifold/AUTHORS | 10 + thirdparty/manifold/LICENSE | 201 + .../manifold/src/collider/include/collider.h | 46 + .../manifold/src/collider/src/collider.cpp | 395 + .../src/cross_section/include/cross_section.h | 176 + .../src/cross_section/src/cross_section.cpp | 795 ++ .../manifold/src/manifold/include/manifold.h | 311 + .../manifold/src/manifold/src/boolean3.cpp | 602 ++ .../manifold/src/manifold/src/boolean3.h | 60 + .../src/manifold/src/boolean_result.cpp | 831 ++ .../src/manifold/src/constructors.cpp | 518 ++ .../manifold/src/manifold/src/csg_tree.cpp | 651 ++ .../manifold/src/manifold/src/csg_tree.h | 109 + .../manifold/src/manifold/src/edge_op.cpp | 678 ++ .../manifold/src/manifold/src/face_op.cpp | 321 + thirdparty/manifold/src/manifold/src/impl.cpp | 956 +++ thirdparty/manifold/src/manifold/src/impl.h | 190 + .../manifold/src/manifold/src/manifold.cpp | 970 +++ .../manifold/src/manifold/src/mesh_fixes.h | 57 + .../manifold/src/manifold/src/properties.cpp | 418 + thirdparty/manifold/src/manifold/src/shared.h | 232 + .../manifold/src/manifold/src/smoothing.cpp | 847 +++ thirdparty/manifold/src/manifold/src/sort.cpp | 616 ++ .../manifold/src/manifold/src/subdivision.cpp | 792 ++ .../manifold/src/polygon/include/polygon.h | 51 + .../manifold/src/polygon/src/polygon.cpp | 877 +++ thirdparty/manifold/src/sdf/include/sdf.h | 24 + thirdparty/manifold/src/sdf/src/sdf.cpp | 362 + .../src/third_party/quickhull/.gitignore | 1 + .../src/third_party/quickhull/.gitrepo | 12 + .../src/third_party/quickhull/ConvexHull.hpp | 182 + .../third_party/quickhull/HalfEdgeMesh.hpp | 77 + .../src/third_party/quickhull/MathUtils.hpp | 46 + .../src/third_party/quickhull/QuickHull.cpp | 503 ++ .../src/third_party/quickhull/QuickHull.hpp | 223 + .../src/third_party/quickhull/README.md | 31 + .../third_party/quickhull/Structs/Mesh.hpp | 255 + .../third_party/quickhull/Structs/Plane.hpp | 36 + .../third_party/quickhull/Structs/Pool.hpp | 35 + .../src/third_party/quickhull/Structs/Ray.hpp | 21 + .../third_party/quickhull/Structs/Vector3.hpp | 140 + .../quickhull/Structs/VertexDataSource.hpp | 48 + .../src/utilities/include/hashtable.h | 174 + .../src/utilities/include/optional_assert.h | 38 + .../manifold/src/utilities/include/par.h | 195 + .../manifold/src/utilities/include/public.h | 696 ++ .../manifold/src/utilities/include/sparse.h | 201 + .../manifold/src/utilities/include/svd.h | 310 + .../manifold/src/utilities/include/tri_dist.h | 225 + .../manifold/src/utilities/include/utils.h | 242 + .../manifold/src/utilities/include/vec.h | 244 + .../manifold/src/utilities/include/vec_view.h | 116 + thirdparty/manifold/thirdparty/glm/.gitrepo | 12 + .../manifold/thirdparty/glm/copying.txt | 54 + .../thirdparty/glm/glm/CMakeLists.txt | 69 + .../manifold/thirdparty/glm/glm/common.hpp | 539 ++ .../thirdparty/glm/glm/detail/_features.hpp | 394 + .../thirdparty/glm/glm/detail/_fixes.hpp | 27 + .../thirdparty/glm/glm/detail/_noise.hpp | 81 + .../thirdparty/glm/glm/detail/_swizzle.hpp | 804 ++ .../glm/glm/detail/_swizzle_func.hpp | 682 ++ .../thirdparty/glm/glm/detail/_vectorize.hpp | 162 + .../glm/glm/detail/compute_common.hpp | 50 + .../glm/detail/compute_vector_relational.hpp | 30 + .../thirdparty/glm/glm/detail/func_common.inl | 792 ++ .../glm/glm/detail/func_common_simd.inl | 231 + .../glm/glm/detail/func_exponential.inl | 152 + .../glm/glm/detail/func_exponential_simd.inl | 37 + .../glm/glm/detail/func_geometric.inl | 243 + .../glm/glm/detail/func_geometric_simd.inl | 163 + .../glm/glm/detail/func_integer.inl | 369 + .../glm/glm/detail/func_integer_simd.inl | 65 + .../thirdparty/glm/glm/detail/func_matrix.inl | 443 ++ .../glm/glm/detail/func_matrix_simd.inl | 252 + .../glm/glm/detail/func_packing.inl | 189 + .../glm/glm/detail/func_packing_simd.inl | 6 + .../glm/glm/detail/func_trigonometric.inl | 197 + .../glm/detail/func_trigonometric_simd.inl | 0 .../glm/glm/detail/func_vector_relational.inl | 87 + .../detail/func_vector_relational_simd.inl | 6 + .../thirdparty/glm/glm/detail/glm.cpp | 263 + .../thirdparty/glm/glm/detail/qualifier.hpp | 229 + .../thirdparty/glm/glm/detail/setup.hpp | 1167 +++ .../thirdparty/glm/glm/detail/type_float.hpp | 68 + .../thirdparty/glm/glm/detail/type_half.hpp | 16 + .../thirdparty/glm/glm/detail/type_half.inl | 241 + .../thirdparty/glm/glm/detail/type_mat2x2.hpp | 177 + .../thirdparty/glm/glm/detail/type_mat2x2.inl | 536 ++ .../thirdparty/glm/glm/detail/type_mat2x3.hpp | 159 + .../thirdparty/glm/glm/detail/type_mat2x3.inl | 510 ++ .../thirdparty/glm/glm/detail/type_mat2x4.hpp | 161 + .../thirdparty/glm/glm/detail/type_mat2x4.inl | 520 ++ .../thirdparty/glm/glm/detail/type_mat3x2.hpp | 167 + .../thirdparty/glm/glm/detail/type_mat3x2.inl | 532 ++ .../thirdparty/glm/glm/detail/type_mat3x3.hpp | 184 + .../thirdparty/glm/glm/detail/type_mat3x3.inl | 601 ++ .../thirdparty/glm/glm/detail/type_mat3x4.hpp | 166 + .../thirdparty/glm/glm/detail/type_mat3x4.inl | 578 ++ .../thirdparty/glm/glm/detail/type_mat4x2.hpp | 171 + .../thirdparty/glm/glm/detail/type_mat4x2.inl | 574 ++ .../thirdparty/glm/glm/detail/type_mat4x3.hpp | 171 + .../thirdparty/glm/glm/detail/type_mat4x3.inl | 598 ++ .../thirdparty/glm/glm/detail/type_mat4x4.hpp | 189 + .../thirdparty/glm/glm/detail/type_mat4x4.inl | 706 ++ .../glm/glm/detail/type_mat4x4_simd.inl | 6 + .../thirdparty/glm/glm/detail/type_quat.hpp | 193 + .../thirdparty/glm/glm/detail/type_quat.inl | 424 ++ .../glm/glm/detail/type_quat_simd.inl | 208 + .../thirdparty/glm/glm/detail/type_vec1.hpp | 308 + .../thirdparty/glm/glm/detail/type_vec1.inl | 553 ++ .../thirdparty/glm/glm/detail/type_vec2.hpp | 402 + .../thirdparty/glm/glm/detail/type_vec2.inl | 915 +++ .../thirdparty/glm/glm/detail/type_vec3.hpp | 435 ++ .../thirdparty/glm/glm/detail/type_vec3.inl | 1070 +++ .../thirdparty/glm/glm/detail/type_vec4.hpp | 508 ++ .../thirdparty/glm/glm/detail/type_vec4.inl | 1142 +++ .../glm/glm/detail/type_vec4_simd.inl | 788 ++ .../thirdparty/glm/glm/exponential.hpp | 110 + .../manifold/thirdparty/glm/glm/ext.hpp | 266 + .../glm/glm/ext/_matrix_vectorize.hpp | 128 + .../glm/glm/ext/matrix_clip_space.hpp | 522 ++ .../glm/glm/ext/matrix_clip_space.inl | 595 ++ .../thirdparty/glm/glm/ext/matrix_common.hpp | 39 + .../thirdparty/glm/glm/ext/matrix_common.inl | 34 + .../glm/glm/ext/matrix_double2x2.hpp | 23 + .../glm/ext/matrix_double2x2_precision.hpp | 49 + .../glm/glm/ext/matrix_double2x3.hpp | 18 + .../glm/ext/matrix_double2x3_precision.hpp | 31 + .../glm/glm/ext/matrix_double2x4.hpp | 18 + .../glm/ext/matrix_double2x4_precision.hpp | 31 + .../glm/glm/ext/matrix_double3x2.hpp | 18 + .../glm/ext/matrix_double3x2_precision.hpp | 31 + .../glm/glm/ext/matrix_double3x3.hpp | 23 + .../glm/ext/matrix_double3x3_precision.hpp | 49 + .../glm/glm/ext/matrix_double3x4.hpp | 18 + .../glm/ext/matrix_double3x4_precision.hpp | 31 + .../glm/glm/ext/matrix_double4x2.hpp | 18 + .../glm/ext/matrix_double4x2_precision.hpp | 31 + .../glm/glm/ext/matrix_double4x3.hpp | 18 + .../glm/ext/matrix_double4x3_precision.hpp | 31 + .../glm/glm/ext/matrix_double4x4.hpp | 23 + .../glm/ext/matrix_double4x4_precision.hpp | 49 + .../glm/glm/ext/matrix_float2x2.hpp | 23 + .../glm/glm/ext/matrix_float2x2_precision.hpp | 49 + .../glm/glm/ext/matrix_float2x3.hpp | 18 + .../glm/glm/ext/matrix_float2x3_precision.hpp | 31 + .../glm/glm/ext/matrix_float2x4.hpp | 18 + .../glm/glm/ext/matrix_float2x4_precision.hpp | 31 + .../glm/glm/ext/matrix_float3x2.hpp | 18 + .../glm/glm/ext/matrix_float3x2_precision.hpp | 31 + .../glm/glm/ext/matrix_float3x3.hpp | 23 + .../glm/glm/ext/matrix_float3x3_precision.hpp | 49 + .../glm/glm/ext/matrix_float3x4.hpp | 18 + .../glm/glm/ext/matrix_float3x4_precision.hpp | 31 + .../glm/glm/ext/matrix_float4x2.hpp | 18 + .../glm/glm/ext/matrix_float4x2_precision.hpp | 31 + .../glm/glm/ext/matrix_float4x3.hpp | 18 + .../glm/glm/ext/matrix_float4x3_precision.hpp | 31 + .../glm/glm/ext/matrix_float4x4.hpp | 23 + .../glm/glm/ext/matrix_float4x4_precision.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_int2x2.hpp | 38 + .../glm/glm/ext/matrix_int2x2_sized.hpp | 70 + .../thirdparty/glm/glm/ext/matrix_int2x3.hpp | 33 + .../glm/glm/ext/matrix_int2x3_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_int2x4.hpp | 33 + .../glm/glm/ext/matrix_int2x4_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_int3x2.hpp | 33 + .../glm/glm/ext/matrix_int3x2_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_int3x3.hpp | 38 + .../glm/glm/ext/matrix_int3x3_sized.hpp | 70 + .../thirdparty/glm/glm/ext/matrix_int3x4.hpp | 33 + .../glm/glm/ext/matrix_int3x4_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_int4x2.hpp | 33 + .../glm/glm/ext/matrix_int4x2_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_int4x3.hpp | 33 + .../glm/glm/ext/matrix_int4x3_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_int4x4.hpp | 38 + .../glm/glm/ext/matrix_int4x4_sized.hpp | 70 + .../thirdparty/glm/glm/ext/matrix_integer.hpp | 91 + .../thirdparty/glm/glm/ext/matrix_integer.inl | 38 + .../glm/glm/ext/matrix_projection.hpp | 149 + .../glm/glm/ext/matrix_projection.inl | 106 + .../glm/glm/ext/matrix_relational.hpp | 132 + .../glm/glm/ext/matrix_relational.inl | 88 + .../glm/glm/ext/matrix_transform.hpp | 171 + .../glm/glm/ext/matrix_transform.inl | 207 + .../thirdparty/glm/glm/ext/matrix_uint2x2.hpp | 38 + .../glm/glm/ext/matrix_uint2x2_sized.hpp | 70 + .../thirdparty/glm/glm/ext/matrix_uint2x3.hpp | 33 + .../glm/glm/ext/matrix_uint2x3_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_uint2x4.hpp | 33 + .../glm/glm/ext/matrix_uint2x4_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_uint3x2.hpp | 33 + .../glm/glm/ext/matrix_uint3x2_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_uint3x3.hpp | 38 + .../glm/glm/ext/matrix_uint3x3_sized.hpp | 70 + .../thirdparty/glm/glm/ext/matrix_uint3x4.hpp | 33 + .../glm/glm/ext/matrix_uint3x4_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_uint4x2.hpp | 33 + .../glm/glm/ext/matrix_uint4x2_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_uint4x3.hpp | 33 + .../glm/glm/ext/matrix_uint4x3_sized.hpp | 49 + .../thirdparty/glm/glm/ext/matrix_uint4x4.hpp | 38 + .../glm/glm/ext/matrix_uint4x4_sized.hpp | 70 + .../glm/glm/ext/quaternion_common.hpp | 135 + .../glm/glm/ext/quaternion_common.inl | 144 + .../glm/glm/ext/quaternion_common_simd.inl | 18 + .../glm/glm/ext/quaternion_double.hpp | 39 + .../glm/ext/quaternion_double_precision.hpp | 42 + .../glm/glm/ext/quaternion_exponential.hpp | 63 + .../glm/glm/ext/quaternion_exponential.inl | 89 + .../glm/glm/ext/quaternion_float.hpp | 39 + .../glm/ext/quaternion_float_precision.hpp | 36 + .../glm/glm/ext/quaternion_geometric.hpp | 70 + .../glm/glm/ext/quaternion_geometric.inl | 36 + .../glm/glm/ext/quaternion_relational.hpp | 62 + .../glm/glm/ext/quaternion_relational.inl | 35 + .../glm/glm/ext/quaternion_transform.hpp | 47 + .../glm/glm/ext/quaternion_transform.inl | 24 + .../glm/glm/ext/quaternion_trigonometric.hpp | 65 + .../glm/glm/ext/quaternion_trigonometric.inl | 37 + .../thirdparty/glm/glm/ext/scalar_common.hpp | 181 + .../thirdparty/glm/glm/ext/scalar_common.inl | 170 + .../glm/glm/ext/scalar_constants.hpp | 40 + .../glm/glm/ext/scalar_constants.inl | 24 + .../glm/glm/ext/scalar_int_sized.hpp | 70 + .../thirdparty/glm/glm/ext/scalar_integer.hpp | 92 + .../thirdparty/glm/glm/ext/scalar_integer.inl | 243 + .../thirdparty/glm/glm/ext/scalar_packing.hpp | 32 + .../thirdparty/glm/glm/ext/scalar_packing.inl | 0 .../glm/glm/ext/scalar_reciprocal.hpp | 135 + .../glm/glm/ext/scalar_reciprocal.inl | 107 + .../glm/glm/ext/scalar_relational.hpp | 68 + .../glm/glm/ext/scalar_relational.inl | 40 + .../glm/glm/ext/scalar_uint_sized.hpp | 70 + .../thirdparty/glm/glm/ext/scalar_ulp.hpp | 77 + .../thirdparty/glm/glm/ext/scalar_ulp.inl | 284 + .../thirdparty/glm/glm/ext/vector_bool1.hpp | 30 + .../glm/glm/ext/vector_bool1_precision.hpp | 34 + .../thirdparty/glm/glm/ext/vector_bool2.hpp | 18 + .../glm/glm/ext/vector_bool2_precision.hpp | 31 + .../thirdparty/glm/glm/ext/vector_bool3.hpp | 18 + .../glm/glm/ext/vector_bool3_precision.hpp | 31 + .../thirdparty/glm/glm/ext/vector_bool4.hpp | 18 + .../glm/glm/ext/vector_bool4_precision.hpp | 31 + .../thirdparty/glm/glm/ext/vector_common.hpp | 228 + .../thirdparty/glm/glm/ext/vector_common.inl | 147 + .../thirdparty/glm/glm/ext/vector_double1.hpp | 31 + .../glm/glm/ext/vector_double1_precision.hpp | 36 + .../thirdparty/glm/glm/ext/vector_double2.hpp | 18 + .../glm/glm/ext/vector_double2_precision.hpp | 31 + .../thirdparty/glm/glm/ext/vector_double3.hpp | 18 + .../glm/glm/ext/vector_double3_precision.hpp | 34 + .../thirdparty/glm/glm/ext/vector_double4.hpp | 18 + .../glm/glm/ext/vector_double4_precision.hpp | 35 + .../thirdparty/glm/glm/ext/vector_float1.hpp | 31 + .../glm/glm/ext/vector_float1_precision.hpp | 36 + .../thirdparty/glm/glm/ext/vector_float2.hpp | 18 + .../glm/glm/ext/vector_float2_precision.hpp | 31 + .../thirdparty/glm/glm/ext/vector_float3.hpp | 18 + .../glm/glm/ext/vector_float3_precision.hpp | 31 + .../thirdparty/glm/glm/ext/vector_float4.hpp | 18 + .../glm/glm/ext/vector_float4_precision.hpp | 31 + .../thirdparty/glm/glm/ext/vector_int1.hpp | 32 + .../glm/glm/ext/vector_int1_sized.hpp | 49 + .../thirdparty/glm/glm/ext/vector_int2.hpp | 18 + .../glm/glm/ext/vector_int2_sized.hpp | 49 + .../thirdparty/glm/glm/ext/vector_int3.hpp | 18 + .../glm/glm/ext/vector_int3_sized.hpp | 49 + .../thirdparty/glm/glm/ext/vector_int4.hpp | 18 + .../glm/glm/ext/vector_int4_sized.hpp | 49 + .../thirdparty/glm/glm/ext/vector_integer.hpp | 149 + .../thirdparty/glm/glm/ext/vector_integer.inl | 85 + .../thirdparty/glm/glm/ext/vector_packing.hpp | 32 + .../thirdparty/glm/glm/ext/vector_packing.inl | 0 .../glm/glm/ext/vector_reciprocal.hpp | 135 + .../glm/glm/ext/vector_reciprocal.inl | 105 + .../glm/glm/ext/vector_relational.hpp | 107 + .../glm/glm/ext/vector_relational.inl | 75 + .../thirdparty/glm/glm/ext/vector_uint1.hpp | 32 + .../glm/glm/ext/vector_uint1_sized.hpp | 49 + .../thirdparty/glm/glm/ext/vector_uint2.hpp | 18 + .../glm/glm/ext/vector_uint2_sized.hpp | 49 + .../thirdparty/glm/glm/ext/vector_uint3.hpp | 18 + .../glm/glm/ext/vector_uint3_sized.hpp | 49 + .../thirdparty/glm/glm/ext/vector_uint4.hpp | 18 + .../glm/glm/ext/vector_uint4_sized.hpp | 49 + .../thirdparty/glm/glm/ext/vector_ulp.hpp | 112 + .../thirdparty/glm/glm/ext/vector_ulp.inl | 74 + .../manifold/thirdparty/glm/glm/fwd.hpp | 1233 +++ .../manifold/thirdparty/glm/glm/geometric.hpp | 116 + .../manifold/thirdparty/glm/glm/glm.cppm | 2675 +++++++ .../manifold/thirdparty/glm/glm/glm.hpp | 137 + .../thirdparty/glm/glm/gtc/bitfield.hpp | 266 + .../thirdparty/glm/glm/gtc/bitfield.inl | 626 ++ .../thirdparty/glm/glm/gtc/color_space.hpp | 56 + .../thirdparty/glm/glm/gtc/color_space.inl | 84 + .../thirdparty/glm/glm/gtc/constants.hpp | 170 + .../thirdparty/glm/glm/gtc/constants.inl | 173 + .../thirdparty/glm/glm/gtc/epsilon.hpp | 60 + .../thirdparty/glm/glm/gtc/epsilon.inl | 80 + .../thirdparty/glm/glm/gtc/integer.hpp | 43 + .../thirdparty/glm/glm/gtc/integer.inl | 33 + .../thirdparty/glm/glm/gtc/matrix_access.hpp | 60 + .../thirdparty/glm/glm/gtc/matrix_access.inl | 62 + .../thirdparty/glm/glm/gtc/matrix_integer.hpp | 433 ++ .../thirdparty/glm/glm/gtc/matrix_inverse.hpp | 50 + .../thirdparty/glm/glm/gtc/matrix_inverse.inl | 118 + .../glm/glm/gtc/matrix_transform.hpp | 36 + .../glm/glm/gtc/matrix_transform.inl | 3 + .../manifold/thirdparty/glm/glm/gtc/noise.hpp | 61 + .../manifold/thirdparty/glm/glm/gtc/noise.inl | 807 ++ .../thirdparty/glm/glm/gtc/packing.hpp | 728 ++ .../thirdparty/glm/glm/gtc/packing.inl | 938 +++ .../thirdparty/glm/glm/gtc/quaternion.hpp | 173 + .../thirdparty/glm/glm/gtc/quaternion.inl | 208 + .../glm/glm/gtc/quaternion_simd.inl | 0 .../thirdparty/glm/glm/gtc/random.hpp | 82 + .../thirdparty/glm/glm/gtc/random.inl | 303 + .../thirdparty/glm/glm/gtc/reciprocal.hpp | 24 + .../manifold/thirdparty/glm/glm/gtc/round.hpp | 160 + .../manifold/thirdparty/glm/glm/gtc/round.inl | 155 + .../thirdparty/glm/glm/gtc/type_aligned.hpp | 1315 ++++ .../thirdparty/glm/glm/gtc/type_precision.hpp | 2094 +++++ .../thirdparty/glm/glm/gtc/type_precision.inl | 6 + .../thirdparty/glm/glm/gtc/type_ptr.hpp | 230 + .../thirdparty/glm/glm/gtc/type_ptr.inl | 386 + .../manifold/thirdparty/glm/glm/gtc/ulp.hpp | 155 + .../manifold/thirdparty/glm/glm/gtc/ulp.inl | 173 + .../manifold/thirdparty/glm/glm/gtc/vec1.hpp | 30 + .../glm/glm/gtx/associated_min_max.hpp | 207 + .../glm/glm/gtx/associated_min_max.inl | 354 + .../manifold/thirdparty/glm/glm/gtx/bit.hpp | 98 + .../manifold/thirdparty/glm/glm/gtx/bit.inl | 92 + .../thirdparty/glm/glm/gtx/closest_point.hpp | 49 + .../thirdparty/glm/glm/gtx/closest_point.inl | 45 + .../thirdparty/glm/glm/gtx/color_encoding.hpp | 54 + .../thirdparty/glm/glm/gtx/color_encoding.inl | 45 + .../thirdparty/glm/glm/gtx/color_space.hpp | 72 + .../thirdparty/glm/glm/gtx/color_space.inl | 141 + .../glm/glm/gtx/color_space_YCoCg.hpp | 60 + .../glm/glm/gtx/color_space_YCoCg.inl | 107 + .../thirdparty/glm/glm/gtx/common.hpp | 76 + .../thirdparty/glm/glm/gtx/common.inl | 125 + .../thirdparty/glm/glm/gtx/compatibility.hpp | 133 + .../thirdparty/glm/glm/gtx/compatibility.inl | 62 + .../thirdparty/glm/glm/gtx/component_wise.hpp | 69 + .../thirdparty/glm/glm/gtx/component_wise.inl | 127 + .../glm/glm/gtx/dual_quaternion.hpp | 274 + .../glm/glm/gtx/dual_quaternion.inl | 352 + .../thirdparty/glm/glm/gtx/easing.hpp | 219 + .../thirdparty/glm/glm/gtx/easing.inl | 436 ++ .../thirdparty/glm/glm/gtx/euler_angles.hpp | 335 + .../thirdparty/glm/glm/gtx/euler_angles.inl | 899 +++ .../thirdparty/glm/glm/gtx/extend.hpp | 42 + .../thirdparty/glm/glm/gtx/extend.inl | 48 + .../glm/glm/gtx/extended_min_max.hpp | 137 + .../glm/glm/gtx/extended_min_max.inl | 138 + .../glm/glm/gtx/exterior_product.hpp | 45 + .../glm/glm/gtx/exterior_product.inl | 26 + .../glm/glm/gtx/fast_exponential.hpp | 95 + .../glm/glm/gtx/fast_exponential.inl | 136 + .../glm/glm/gtx/fast_square_root.hpp | 98 + .../glm/glm/gtx/fast_square_root.inl | 75 + .../glm/glm/gtx/fast_trigonometry.hpp | 79 + .../glm/glm/gtx/fast_trigonometry.inl | 142 + .../glm/glm/gtx/float_notmalize.inl | 13 + .../thirdparty/glm/glm/gtx/functions.hpp | 56 + .../thirdparty/glm/glm/gtx/functions.inl | 30 + .../thirdparty/glm/glm/gtx/gradient_paint.hpp | 53 + .../thirdparty/glm/glm/gtx/gradient_paint.inl | 36 + .../glm/glm/gtx/handed_coordinate_space.hpp | 50 + .../glm/glm/gtx/handed_coordinate_space.inl | 26 + .../manifold/thirdparty/glm/glm/gtx/hash.hpp | 146 + .../manifold/thirdparty/glm/glm/gtx/hash.inl | 175 + .../thirdparty/glm/glm/gtx/integer.hpp | 76 + .../thirdparty/glm/glm/gtx/integer.inl | 185 + .../thirdparty/glm/glm/gtx/intersect.hpp | 92 + .../thirdparty/glm/glm/gtx/intersect.inl | 200 + .../manifold/thirdparty/glm/glm/gtx/io.hpp | 201 + .../manifold/thirdparty/glm/glm/gtx/io.inl | 440 ++ .../thirdparty/glm/glm/gtx/log_base.hpp | 48 + .../thirdparty/glm/glm/gtx/log_base.inl | 16 + .../glm/glm/gtx/matrix_cross_product.hpp | 47 + .../glm/glm/gtx/matrix_cross_product.inl | 37 + .../glm/glm/gtx/matrix_decompose.hpp | 52 + .../glm/glm/gtx/matrix_decompose.inl | 234 + .../glm/glm/gtx/matrix_factorisation.hpp | 69 + .../glm/glm/gtx/matrix_factorisation.inl | 84 + .../glm/glm/gtx/matrix_interpolation.hpp | 60 + .../glm/glm/gtx/matrix_interpolation.inl | 146 + .../glm/glm/gtx/matrix_major_storage.hpp | 119 + .../glm/glm/gtx/matrix_major_storage.inl | 166 + .../glm/glm/gtx/matrix_operation.hpp | 103 + .../glm/glm/gtx/matrix_operation.inl | 176 + .../thirdparty/glm/glm/gtx/matrix_query.hpp | 77 + .../thirdparty/glm/glm/gtx/matrix_query.inl | 119 + .../glm/glm/gtx/matrix_transform_2d.hpp | 81 + .../glm/glm/gtx/matrix_transform_2d.inl | 68 + .../thirdparty/glm/glm/gtx/mixed_product.hpp | 41 + .../thirdparty/glm/glm/gtx/mixed_product.inl | 15 + .../manifold/thirdparty/glm/glm/gtx/norm.hpp | 88 + .../manifold/thirdparty/glm/glm/gtx/norm.inl | 95 + .../thirdparty/glm/glm/gtx/normal.hpp | 41 + .../thirdparty/glm/glm/gtx/normal.inl | 15 + .../thirdparty/glm/glm/gtx/normalize_dot.hpp | 49 + .../thirdparty/glm/glm/gtx/normalize_dot.inl | 16 + .../glm/glm/gtx/number_precision.hpp | 47 + .../thirdparty/glm/glm/gtx/optimum_pow.hpp | 52 + .../thirdparty/glm/glm/gtx/optimum_pow.inl | 22 + .../thirdparty/glm/glm/gtx/orthonormalize.hpp | 49 + .../thirdparty/glm/glm/gtx/orthonormalize.inl | 29 + .../manifold/thirdparty/glm/glm/gtx/pca.hpp | 115 + .../manifold/thirdparty/glm/glm/gtx/pca.inl | 343 + .../thirdparty/glm/glm/gtx/perpendicular.hpp | 41 + .../thirdparty/glm/glm/gtx/perpendicular.inl | 10 + .../glm/glm/gtx/polar_coordinates.hpp | 48 + .../glm/glm/gtx/polar_coordinates.inl | 36 + .../thirdparty/glm/glm/gtx/projection.hpp | 43 + .../thirdparty/glm/glm/gtx/projection.inl | 10 + .../thirdparty/glm/glm/gtx/quaternion.hpp | 174 + .../thirdparty/glm/glm/gtx/quaternion.inl | 159 + .../manifold/thirdparty/glm/glm/gtx/range.hpp | 98 + .../thirdparty/glm/glm/gtx/raw_data.hpp | 51 + .../thirdparty/glm/glm/gtx/raw_data.inl | 2 + .../glm/glm/gtx/rotate_normalized_axis.hpp | 68 + .../glm/glm/gtx/rotate_normalized_axis.inl | 58 + .../thirdparty/glm/glm/gtx/rotate_vector.hpp | 123 + .../thirdparty/glm/glm/gtx/rotate_vector.inl | 187 + .../glm/glm/gtx/scalar_multiplication.hpp | 82 + .../glm/glm/gtx/scalar_relational.hpp | 36 + .../glm/glm/gtx/scalar_relational.inl | 88 + .../thirdparty/glm/glm/gtx/spline.hpp | 65 + .../thirdparty/glm/glm/gtx/spline.inl | 60 + .../thirdparty/glm/glm/gtx/std_based_type.hpp | 68 + .../thirdparty/glm/glm/gtx/std_based_type.inl | 6 + .../thirdparty/glm/glm/gtx/string_cast.hpp | 46 + .../thirdparty/glm/glm/gtx/string_cast.inl | 492 ++ .../thirdparty/glm/glm/gtx/texture.hpp | 46 + .../thirdparty/glm/glm/gtx/texture.inl | 17 + .../thirdparty/glm/glm/gtx/transform.hpp | 60 + .../thirdparty/glm/glm/gtx/transform.inl | 23 + .../thirdparty/glm/glm/gtx/transform2.hpp | 89 + .../thirdparty/glm/glm/gtx/transform2.inl | 125 + .../thirdparty/glm/glm/gtx/type_aligned.hpp | 982 +++ .../thirdparty/glm/glm/gtx/type_aligned.inl | 6 + .../thirdparty/glm/glm/gtx/type_trait.hpp | 85 + .../thirdparty/glm/glm/gtx/type_trait.inl | 61 + .../thirdparty/glm/glm/gtx/vec_swizzle.hpp | 2786 +++++++ .../thirdparty/glm/glm/gtx/vector_angle.hpp | 57 + .../thirdparty/glm/glm/gtx/vector_angle.inl | 45 + .../thirdparty/glm/glm/gtx/vector_query.hpp | 66 + .../thirdparty/glm/glm/gtx/vector_query.inl | 154 + .../manifold/thirdparty/glm/glm/gtx/wrap.hpp | 37 + .../manifold/thirdparty/glm/glm/gtx/wrap.inl | 6 + .../manifold/thirdparty/glm/glm/integer.hpp | 212 + .../manifold/thirdparty/glm/glm/mat2x2.hpp | 9 + .../manifold/thirdparty/glm/glm/mat2x3.hpp | 9 + .../manifold/thirdparty/glm/glm/mat2x4.hpp | 9 + .../manifold/thirdparty/glm/glm/mat3x2.hpp | 9 + .../manifold/thirdparty/glm/glm/mat3x3.hpp | 8 + .../manifold/thirdparty/glm/glm/mat3x4.hpp | 8 + .../manifold/thirdparty/glm/glm/mat4x2.hpp | 9 + .../manifold/thirdparty/glm/glm/mat4x3.hpp | 8 + .../manifold/thirdparty/glm/glm/mat4x4.hpp | 9 + .../manifold/thirdparty/glm/glm/matrix.hpp | 161 + .../manifold/thirdparty/glm/glm/packing.hpp | 173 + .../manifold/thirdparty/glm/glm/simd/common.h | 240 + .../thirdparty/glm/glm/simd/exponential.h | 20 + .../thirdparty/glm/glm/simd/geometric.h | 124 + .../thirdparty/glm/glm/simd/integer.h | 115 + .../manifold/thirdparty/glm/glm/simd/matrix.h | 1028 +++ .../manifold/thirdparty/glm/glm/simd/neon.h | 155 + .../thirdparty/glm/glm/simd/packing.h | 8 + .../thirdparty/glm/glm/simd/platform.h | 469 ++ .../thirdparty/glm/glm/simd/trigonometric.h | 9 + .../glm/glm/simd/vector_relational.h | 8 + .../thirdparty/glm/glm/trigonometric.hpp | 210 + .../manifold/thirdparty/glm/glm/vec2.hpp | 14 + .../manifold/thirdparty/glm/glm/vec3.hpp | 14 + .../manifold/thirdparty/glm/glm/vec4.hpp | 15 + .../thirdparty/glm/glm/vector_relational.hpp | 121 + .../manifold/thirdparty/thrust/.gitrepo | 12 + thirdparty/manifold/thirdparty/thrust/LICENSE | 249 + .../thrust/dependencies/libcudacxx/.gitrepo | 12 + .../dependencies/libcudacxx/LICENSE.TXT | 368 + .../libcudacxx/include/cuda/annotated_ptr | 337 + .../libcudacxx/include/cuda/atomic | 10 + .../libcudacxx/include/cuda/barrier | 10 + .../libcudacxx/include/cuda/latch | 10 + .../libcudacxx/include/cuda/pipeline | 635 ++ .../libcudacxx/include/cuda/semaphore | 10 + .../libcudacxx/include/cuda/std/array | 34 + .../libcudacxx/include/cuda/std/atomic | 296 + .../libcudacxx/include/cuda/std/barrier | 468 ++ .../libcudacxx/include/cuda/std/bit | 24 + .../libcudacxx/include/cuda/std/cassert | 28 + .../libcudacxx/include/cuda/std/ccomplex | 14 + .../libcudacxx/include/cuda/std/cfloat | 26 + .../libcudacxx/include/cuda/std/chrono | 83 + .../libcudacxx/include/cuda/std/climits | 106 + .../libcudacxx/include/cuda/std/cmath | 27 + .../libcudacxx/include/cuda/std/complex | 27 + .../libcudacxx/include/cuda/std/cstddef | 40 + .../libcudacxx/include/cuda/std/cstdint | 90 + .../libcudacxx/include/cuda/std/ctime | 27 + .../include/cuda/std/detail/__access_property | 323 + .../include/cuda/std/detail/__annotated_ptr | 227 + .../include/cuda/std/detail/__config | 196 + .../include/cuda/std/detail/__functional_base | 25 + .../include/cuda/std/detail/__pragma_pop | 11 + .../include/cuda/std/detail/__pragma_push | 12 + .../cuda/std/detail/__threading_support | 26 + .../std/detail/libcxx/include/CMakeLists.txt | 277 + .../std/detail/libcxx/include/__bit_reference | 1289 ++++ .../libcxx/include/__bsd_locale_defaults.h | 36 + .../libcxx/include/__bsd_locale_fallbacks.h | 139 + .../cuda/std/detail/libcxx/include/__config | 1943 +++++ .../detail/libcxx/include/__config_site.in | 36 + .../cuda/std/detail/libcxx/include/__debug | 284 + .../cuda/std/detail/libcxx/include/__errc | 217 + .../std/detail/libcxx/include/__functional_03 | 1595 ++++ .../detail/libcxx/include/__functional_base | 673 ++ .../libcxx/include/__functional_base_03 | 223 + .../std/detail/libcxx/include/__hash_table | 2914 +++++++ .../detail/libcxx/include/__libcpp_version | 1 + .../cuda/std/detail/libcxx/include/__locale | 1553 ++++ .../std/detail/libcxx/include/__mutex_base | 541 ++ .../std/detail/libcxx/include/__node_handle | 208 + .../cuda/std/detail/libcxx/include/__nullptr | 61 + .../std/detail/libcxx/include/__pragma_pop | 16 + .../std/detail/libcxx/include/__pragma_push | 25 + .../std/detail/libcxx/include/__split_buffer | 644 ++ .../std/detail/libcxx/include/__sso_allocator | 76 + .../std/detail/libcxx/include/__std_stream | 361 + .../cuda/std/detail/libcxx/include/__string | 985 +++ .../detail/libcxx/include/__threading_support | 787 ++ .../cuda/std/detail/libcxx/include/__tree | 2844 +++++++ .../cuda/std/detail/libcxx/include/__tuple | 567 ++ .../std/detail/libcxx/include/__undef_macros | 33 + .../cuda/std/detail/libcxx/include/algorithm | 5769 ++++++++++++++ .../cuda/std/detail/libcxx/include/any | 671 ++ .../cuda/std/detail/libcxx/include/array | 486 ++ .../cuda/std/detail/libcxx/include/atomic | 2763 +++++++ .../cuda/std/detail/libcxx/include/barrier | 384 + .../cuda/std/detail/libcxx/include/bit | 895 +++ .../cuda/std/detail/libcxx/include/bitset | 1109 +++ .../cuda/std/detail/libcxx/include/cassert | 25 + .../cuda/std/detail/libcxx/include/ccomplex | 28 + .../cuda/std/detail/libcxx/include/cctype | 120 + .../cuda/std/detail/libcxx/include/cerrno | 32 + .../cuda/std/detail/libcxx/include/cfenv | 81 + .../cuda/std/detail/libcxx/include/cfloat | 79 + .../cuda/std/detail/libcxx/include/charconv | 616 ++ .../cuda/std/detail/libcxx/include/chrono | 3348 ++++++++ .../cuda/std/detail/libcxx/include/cinttypes | 257 + .../cuda/std/detail/libcxx/include/ciso646 | 24 + .../cuda/std/detail/libcxx/include/climits | 62 + .../cuda/std/detail/libcxx/include/clocale | 54 + .../cuda/std/detail/libcxx/include/cmath | 725 ++ .../cuda/std/detail/libcxx/include/codecvt | 549 ++ .../cuda/std/detail/libcxx/include/compare | 678 ++ .../cuda/std/detail/libcxx/include/complex | 1590 ++++ .../cuda/std/detail/libcxx/include/complex.h | 36 + .../detail/libcxx/include/condition_variable | 268 + .../cuda/std/detail/libcxx/include/csetjmp | 47 + .../cuda/std/detail/libcxx/include/csignal | 57 + .../cuda/std/detail/libcxx/include/cstdarg | 54 + .../cuda/std/detail/libcxx/include/cstdbool | 38 + .../cuda/std/detail/libcxx/include/cstddef | 143 + .../cuda/std/detail/libcxx/include/cstdint | 197 + .../cuda/std/detail/libcxx/include/cstdio | 171 + .../cuda/std/detail/libcxx/include/cstdlib | 182 + .../cuda/std/detail/libcxx/include/cstring | 103 + .../cuda/std/detail/libcxx/include/ctgmath | 28 + .../cuda/std/detail/libcxx/include/ctime | 94 + .../cuda/std/detail/libcxx/include/ctype.h | 59 + .../cuda/std/detail/libcxx/include/cwchar | 192 + .../cuda/std/detail/libcxx/include/cwctype | 86 + .../cuda/std/detail/libcxx/include/deque | 3039 ++++++++ .../cuda/std/detail/libcxx/include/errno.h | 397 + .../cuda/std/detail/libcxx/include/exception | 329 + .../cuda/std/detail/libcxx/include/execution | 19 + .../libcxx/include/experimental/__config | 79 + .../libcxx/include/experimental/__memory | 89 + .../libcxx/include/experimental/algorithm | 59 + .../libcxx/include/experimental/coroutine | 334 + .../detail/libcxx/include/experimental/deque | 46 + .../libcxx/include/experimental/filesystem | 256 + .../libcxx/include/experimental/forward_list | 46 + .../libcxx/include/experimental/functional | 458 ++ .../libcxx/include/experimental/iterator | 113 + .../detail/libcxx/include/experimental/list | 46 + .../detail/libcxx/include/experimental/map | 56 + .../include/experimental/memory_resource | 426 ++ .../include/experimental/propagate_const | 578 ++ .../detail/libcxx/include/experimental/regex | 61 + .../detail/libcxx/include/experimental/set | 56 + .../detail/libcxx/include/experimental/simd | 1569 ++++ .../detail/libcxx/include/experimental/string | 61 + .../libcxx/include/experimental/type_traits | 154 + .../libcxx/include/experimental/unordered_map | 64 + .../libcxx/include/experimental/unordered_set | 58 + .../libcxx/include/experimental/utility | 46 + .../detail/libcxx/include/experimental/vector | 46 + .../cuda/std/detail/libcxx/include/ext/__hash | 133 + .../std/detail/libcxx/include/ext/hash_map | 984 +++ .../std/detail/libcxx/include/ext/hash_set | 659 ++ .../cuda/std/detail/libcxx/include/fenv.h | 116 + .../cuda/std/detail/libcxx/include/filesystem | 2644 +++++++ .../cuda/std/detail/libcxx/include/float.h | 97 + .../std/detail/libcxx/include/forward_list | 1781 +++++ .../cuda/std/detail/libcxx/include/fstream | 1763 +++++ .../cuda/std/detail/libcxx/include/functional | 3126 ++++++++ .../cuda/std/detail/libcxx/include/future | 2608 +++++++ .../detail/libcxx/include/initializer_list | 117 + .../cuda/std/detail/libcxx/include/inttypes.h | 262 + .../cuda/std/detail/libcxx/include/iomanip | 670 ++ .../cuda/std/detail/libcxx/include/ios | 1066 +++ .../cuda/std/detail/libcxx/include/iosfwd | 229 + .../cuda/std/detail/libcxx/include/iostream | 63 + .../cuda/std/detail/libcxx/include/istream | 1651 ++++ .../cuda/std/detail/libcxx/include/iterator | 1943 +++++ .../cuda/std/detail/libcxx/include/latch | 129 + .../cuda/std/detail/libcxx/include/limits | 837 ++ .../cuda/std/detail/libcxx/include/limits.h | 68 + .../cuda/std/detail/libcxx/include/list | 2488 ++++++ .../cuda/std/detail/libcxx/include/locale | 4353 +++++++++++ .../cuda/std/detail/libcxx/include/locale.h | 44 + .../cuda/std/detail/libcxx/include/map | 2246 ++++++ .../cuda/std/detail/libcxx/include/math.h | 1574 ++++ .../cuda/std/detail/libcxx/include/memory | 5443 +++++++++++++ .../detail/libcxx/include/module.modulemap | 612 ++ .../cuda/std/detail/libcxx/include/mutex | 711 ++ .../cuda/std/detail/libcxx/include/new | 378 + .../cuda/std/detail/libcxx/include/numeric | 593 ++ .../cuda/std/detail/libcxx/include/optional | 1420 ++++ .../cuda/std/detail/libcxx/include/ostream | 1107 +++ .../cuda/std/detail/libcxx/include/queue | 803 ++ .../cuda/std/detail/libcxx/include/random | 6745 +++++++++++++++++ .../cuda/std/detail/libcxx/include/ratio | 533 ++ .../cuda/std/detail/libcxx/include/regex | 6662 ++++++++++++++++ .../detail/libcxx/include/scoped_allocator | 683 ++ .../cuda/std/detail/libcxx/include/semaphore | 447 ++ .../cuda/std/detail/libcxx/include/set | 1493 ++++ .../cuda/std/detail/libcxx/include/setjmp.h | 44 + .../std/detail/libcxx/include/shared_mutex | 508 ++ .../cuda/std/detail/libcxx/include/span | 591 ++ .../cuda/std/detail/libcxx/include/sstream | 985 +++ .../cuda/std/detail/libcxx/include/stack | 321 + .../cuda/std/detail/libcxx/include/stdbool.h | 38 + .../cuda/std/detail/libcxx/include/stddef.h | 66 + .../cuda/std/detail/libcxx/include/stdexcept | 321 + .../cuda/std/detail/libcxx/include/stdint.h | 129 + .../cuda/std/detail/libcxx/include/stdio.h | 119 + .../cuda/std/detail/libcxx/include/stdlib.h | 103 + .../cuda/std/detail/libcxx/include/streambuf | 500 ++ .../cuda/std/detail/libcxx/include/string | 4365 +++++++++++ .../cuda/std/detail/libcxx/include/string.h | 109 + .../std/detail/libcxx/include/string_view | 840 ++ .../cuda/std/detail/libcxx/include/strstream | 399 + .../include/support/android/locale_bionic.h | 67 + .../include/support/atomic/atomic_base.h | 192 + .../include/support/atomic/atomic_c11.h | 183 + .../include/support/atomic/atomic_cuda.h | 482 ++ .../support/atomic/atomic_cuda_derived.h | 143 + .../support/atomic/atomic_cuda_generated.h | 2644 +++++++ .../include/support/atomic/atomic_gcc.h | 16 + .../include/support/atomic/atomic_msvc.h | 449 ++ .../include/support/atomic/atomic_nvrtc.h | 16 + .../include/support/atomic/atomic_scopes.h | 46 + .../include/support/atomic/cxx_atomic.h | 160 + .../libcxx/include/support/fuchsia/xlocale.h | 22 + .../libcxx/include/support/ibm/limits.h | 98 + .../include/support/ibm/locale_mgmt_aix.h | 84 + .../libcxx/include/support/ibm/support.h | 53 + .../libcxx/include/support/ibm/xlocale.h | 270 + .../libcxx/include/support/musl/xlocale.h | 57 + .../libcxx/include/support/newlib/xlocale.h | 27 + .../include/support/solaris/floatingpoint.h | 13 + .../libcxx/include/support/solaris/wchar.h | 46 + .../libcxx/include/support/solaris/xlocale.h | 76 + .../support/xlocale/__nop_locale_mgmt.h | 51 + .../support/xlocale/__posix_l_fallback.h | 164 + .../support/xlocale/__strtonum_fallback.h | 66 + .../std/detail/libcxx/include/system_error | 486 ++ .../cuda/std/detail/libcxx/include/tgmath.h | 36 + .../cuda/std/detail/libcxx/include/thread | 420 + .../cuda/std/detail/libcxx/include/tuple | 1547 ++++ .../std/detail/libcxx/include/type_traits | 4898 ++++++++++++ .../cuda/std/detail/libcxx/include/typeindex | 102 + .../cuda/std/detail/libcxx/include/typeinfo | 350 + .../std/detail/libcxx/include/unordered_map | 2445 ++++++ .../std/detail/libcxx/include/unordered_set | 1680 ++++ .../cuda/std/detail/libcxx/include/utility | 1650 ++++ .../cuda/std/detail/libcxx/include/valarray | 4930 ++++++++++++ .../cuda/std/detail/libcxx/include/variant | 1668 ++++ .../cuda/std/detail/libcxx/include/vector | 3406 +++++++++ .../cuda/std/detail/libcxx/include/version | 242 + .../cuda/std/detail/libcxx/include/wchar.h | 181 + .../cuda/std/detail/libcxx/include/wctype.h | 78 + .../libcudacxx/include/cuda/std/functional | 32 + .../include/cuda/std/initializer_list | 25 + .../libcudacxx/include/cuda/std/iterator | 29 + .../libcudacxx/include/cuda/std/latch | 40 + .../libcudacxx/include/cuda/std/limits | 29 + .../libcudacxx/include/cuda/std/ratio | 27 + .../libcudacxx/include/cuda/std/semaphore | 47 + .../libcudacxx/include/cuda/std/tuple | 33 + .../libcudacxx/include/cuda/std/type_traits | 28 + .../libcudacxx/include/cuda/std/utility | 27 + .../libcudacxx/include/cuda/std/version | 21 + .../include/nv/detail/__preprocessor | 117 + .../include/nv/detail/__target_macros | 493 ++ .../dependencies/libcudacxx/include/nv/target | 207 + .../thirdparty/thrust/thrust/addressof.h | 31 + .../thrust/thrust/adjacent_difference.h | 244 + .../thirdparty/thrust/thrust/advance.h | 140 + .../thrust/thrust/allocate_unique.h | 443 ++ .../thirdparty/thrust/thrust/async/copy.h | 154 + .../thirdparty/thrust/thrust/async/for_each.h | 123 + .../thirdparty/thrust/thrust/async/reduce.h | 446 ++ .../thirdparty/thrust/thrust/async/scan.h | 344 + .../thirdparty/thrust/thrust/async/sort.h | 280 + .../thrust/thrust/async/transform.h | 138 + .../thirdparty/thrust/thrust/binary_search.h | 1899 +++++ .../thirdparty/thrust/thrust/complex.h | 1047 +++ .../manifold/thirdparty/thrust/thrust/copy.h | 512 ++ .../manifold/thirdparty/thrust/thrust/count.h | 231 + .../thrust/detail/adjacent_difference.inl | 86 + .../thrust/thrust/detail/advance.inl | 74 + .../thrust/thrust/detail/algorithm_wrapper.h | 27 + .../thrust/thrust/detail/alignment.h | 230 + .../detail/allocator/allocator_traits.h | 442 ++ .../detail/allocator/allocator_traits.inl | 465 ++ .../detail/allocator/copy_construct_range.h | 46 + .../detail/allocator/copy_construct_range.inl | 310 + .../allocator/default_construct_range.h | 36 + .../allocator/default_construct_range.inl | 112 + .../thrust/detail/allocator/destroy_range.h | 33 + .../thrust/detail/allocator/destroy_range.inl | 167 + .../detail/allocator/fill_construct_range.h | 35 + .../detail/allocator/fill_construct_range.inl | 114 + .../detail/allocator/malloc_allocator.h | 51 + .../detail/allocator/malloc_allocator.inl | 65 + .../detail/allocator/no_throw_allocator.h | 72 + .../detail/allocator/tagged_allocator.h | 100 + .../detail/allocator/tagged_allocator.inl | 104 + .../detail/allocator/temporary_allocator.h | 84 + .../detail/allocator/temporary_allocator.inl | 79 + .../detail/allocator_aware_execution_policy.h | 101 + .../thrust/thrust/detail/binary_search.inl | 480 ++ .../thrust/thrust/detail/caching_allocator.h | 47 + .../thrust/thrust/detail/complex/arithmetic.h | 303 + .../thrust/thrust/detail/complex/c99math.h | 197 + .../thrust/thrust/detail/complex/catrig.h | 785 ++ .../thrust/thrust/detail/complex/catrigf.h | 500 ++ .../thrust/thrust/detail/complex/ccosh.h | 215 + .../thrust/thrust/detail/complex/ccoshf.h | 143 + .../thrust/thrust/detail/complex/cexp.h | 185 + .../thrust/thrust/detail/complex/cexpf.h | 163 + .../thrust/thrust/detail/complex/clog.h | 214 + .../thrust/thrust/detail/complex/clogf.h | 200 + .../thrust/thrust/detail/complex/complex.inl | 355 + .../thrust/thrust/detail/complex/cpow.h | 57 + .../thrust/thrust/detail/complex/cproj.h | 72 + .../thrust/thrust/detail/complex/csinh.h | 207 + .../thrust/thrust/detail/complex/csinhf.h | 144 + .../thrust/thrust/detail/complex/csqrt.h | 154 + .../thrust/thrust/detail/complex/csqrtf.h | 149 + .../thrust/thrust/detail/complex/ctanh.h | 202 + .../thrust/thrust/detail/complex/ctanhf.h | 126 + .../thrust/detail/complex/math_private.h | 136 + .../thrust/thrust/detail/complex/stream.h | 74 + .../thirdparty/thrust/thrust/detail/config.h | 24 + .../thrust/thrust/detail/config/compiler.h | 189 + .../thrust/detail/config/compiler_fence.h | 62 + .../thrust/thrust/detail/config/config.h | 40 + .../thrust/detail/config/cpp_compatibility.h | 101 + .../thrust/thrust/detail/config/cpp_dialect.h | 140 + .../thrust/thrust/detail/config/debug.h | 32 + .../thrust/thrust/detail/config/deprecated.h | 42 + .../thrust/detail/config/device_system.h | 44 + .../thrust/detail/config/exec_check_disable.h | 43 + .../thrust/thrust/detail/config/forceinline.h | 36 + .../thrust/detail/config/global_workarounds.h | 27 + .../thrust/thrust/detail/config/host_device.h | 44 + .../thrust/thrust/detail/config/host_system.h | 41 + .../thrust/detail/config/memory_resource.h | 35 + .../thrust/thrust/detail/config/namespace.h | 120 + .../thrust/detail/config/simple_defines.h | 30 + .../thrust/thrust/detail/contiguous_storage.h | 235 + .../thrust/detail/contiguous_storage.inl | 550 ++ .../thirdparty/thrust/thrust/detail/copy.h | 90 + .../thirdparty/thrust/thrust/detail/copy.inl | 129 + .../thirdparty/thrust/thrust/detail/copy_if.h | 71 + .../thrust/thrust/detail/copy_if.inl | 107 + .../thirdparty/thrust/thrust/detail/count.h | 60 + .../thirdparty/thrust/thrust/detail/count.inl | 77 + .../thrust/thrust/detail/cpp11_required.h | 26 + .../thrust/thrust/detail/cpp14_required.h | 26 + .../thirdparty/thrust/thrust/detail/cstdint.h | 83 + .../dependencies_aware_execution_policy.h | 106 + .../thrust/thrust/detail/device_delete.inl | 44 + .../thrust/thrust/detail/device_free.inl | 39 + .../thrust/thrust/detail/device_malloc.inl | 53 + .../thrust/thrust/detail/device_new.inl | 56 + .../thrust/thrust/detail/device_ptr.inl | 64 + .../thrust/thrust/detail/distance.inl | 35 + .../thirdparty/thrust/thrust/detail/equal.inl | 80 + .../thrust/thrust/detail/event_error.h | 164 + .../thrust/detail/execute_with_allocator.h | 149 + .../detail/execute_with_allocator_fwd.h | 106 + .../thrust/detail/execute_with_dependencies.h | 267 + .../thrust/thrust/detail/execution_policy.h | 76 + .../thrust/thrust/detail/extrema.inl | 169 + .../thirdparty/thrust/thrust/detail/fill.inl | 86 + .../thirdparty/thrust/thrust/detail/find.inl | 108 + .../thrust/thrust/detail/for_each.inl | 86 + .../thrust/thrust/detail/function.h | 161 + .../thrust/thrust/detail/functional.inl | 126 + .../thrust/thrust/detail/functional/actor.h | 155 + .../thrust/thrust/detail/functional/actor.inl | 114 + .../thrust/detail/functional/argument.h | 74 + .../thrust/detail/functional/composite.h | 164 + .../thrust/detail/functional/operators.h | 25 + .../operators/arithmetic_operators.h | 436 ++ .../operators/assignment_operator.h | 79 + .../functional/operators/bitwise_operators.h | 338 + .../operators/compound_assignment_operators.h | 512 ++ .../functional/operators/logical_operators.h | 143 + .../functional/operators/operator_adaptors.h | 136 + .../operators/relational_operators.h | 322 + .../thrust/detail/functional/placeholder.h | 38 + .../thrust/thrust/detail/functional/value.h | 79 + .../thrust/thrust/detail/gather.inl | 161 + .../thrust/thrust/detail/generate.inl | 92 + .../thrust/thrust/detail/get_iterator_value.h | 55 + .../thrust/thrust/detail/inner_product.inl | 101 + .../thrust/thrust/detail/integer_math.h | 152 + .../thrust/thrust/detail/integer_traits.h | 130 + .../thrust/detail/internal_functional.h | 558 ++ .../thrust/thrust/detail/logical.inl | 95 + .../thrust/thrust/detail/malloc_and_free.h | 83 + .../thrust/thrust/detail/memory_algorithms.h | 237 + .../thrust/thrust/detail/memory_wrapper.h | 30 + .../thirdparty/thrust/thrust/detail/merge.inl | 221 + .../thirdparty/thrust/thrust/detail/minmax.h | 51 + .../thrust/thrust/detail/mismatch.inl | 89 + .../thrust/detail/modern_gcc_required.h | 26 + .../thrust/thrust/detail/mpl/math.h | 175 + .../thrust/thrust/detail/numeric_traits.h | 129 + .../thrust/thrust/detail/numeric_wrapper.h | 27 + .../thrust/thrust/detail/overlapped_copy.h | 131 + .../thirdparty/thrust/thrust/detail/pair.inl | 231 + .../thrust/thrust/detail/partition.inl | 411 + .../thirdparty/thrust/thrust/detail/pointer.h | 255 + .../thrust/thrust/detail/pointer.inl | 209 + .../thrust/thrust/detail/preprocessor.h | 1182 +++ .../thrust/thrust/detail/range/head_flags.h | 229 + .../thrust/thrust/detail/range/tail_flags.h | 133 + .../thrust/thrust/detail/raw_pointer_cast.h | 50 + .../thrust/thrust/detail/raw_reference_cast.h | 332 + .../thrust/thrust/detail/reduce.inl | 276 + .../thrust/thrust/detail/reference.h | 518 ++ .../detail/reference_forward_declaration.h | 28 + .../thrust/thrust/detail/remove.inl | 246 + .../thrust/thrust/detail/replace.inl | 218 + .../thrust/thrust/detail/reverse.inl | 87 + .../thirdparty/thrust/thrust/detail/scan.inl | 522 ++ .../thrust/thrust/detail/scatter.inl | 163 + .../thrust/thrust/detail/select_system.h | 84 + .../thirdparty/thrust/thrust/detail/seq.h | 52 + .../thrust/thrust/detail/sequence.inl | 114 + .../thrust/thrust/detail/set_operations.inl | 865 +++ .../thrust/thrust/detail/shuffle.inl | 83 + .../thirdparty/thrust/thrust/detail/sort.inl | 404 + .../thrust/thrust/detail/static_assert.h | 91 + .../thrust/thrust/detail/static_map.h | 169 + .../thirdparty/thrust/thrust/detail/swap.h | 35 + .../thirdparty/thrust/thrust/detail/swap.inl | 22 + .../thrust/thrust/detail/swap_ranges.inl | 64 + .../thrust/thrust/detail/tabulate.inl | 58 + .../thrust/thrust/detail/temporary_array.h | 181 + .../thrust/thrust/detail/temporary_array.inl | 168 + .../thrust/thrust/detail/temporary_buffer.h | 75 + .../thrust/thrust/detail/transform.inl | 247 + .../thrust/thrust/detail/transform_reduce.inl | 68 + .../thrust/thrust/detail/transform_scan.inl | 117 + .../thrust/thrust/detail/trivial_sequence.h | 96 + .../thirdparty/thrust/thrust/detail/tuple.inl | 1004 +++ .../thrust/thrust/detail/tuple_algorithms.h | 110 + .../thrust/detail/tuple_meta_transform.h | 58 + .../thrust/thrust/detail/tuple_transform.h | 84 + .../thrust/thrust/detail/type_deduction.h | 90 + .../thrust/thrust/detail/type_traits.h | 717 ++ .../detail/type_traits/function_traits.h | 97 + .../detail/type_traits/has_member_function.h | 38 + .../detail/type_traits/has_nested_type.h | 32 + .../detail/type_traits/has_trivial_assign.h | 47 + .../detail/type_traits/is_call_possible.h | 162 + .../type_traits/is_metafunction_defined.h | 42 + .../iterator/is_discard_iterator.h | 39 + .../type_traits/iterator/is_output_iterator.h | 65 + .../thrust/detail/type_traits/minimum_type.h | 163 + .../detail/type_traits/pointer_traits.h | 395 + .../result_of_adaptable_function.h | 59 + .../thrust/detail/uninitialized_copy.inl | 95 + .../thrust/detail/uninitialized_fill.inl | 90 + .../thrust/thrust/detail/unique.inl | 393 + .../thrust/thrust/detail/use_default.h | 26 + .../thrust/thrust/detail/util/align.h | 60 + .../thrust/thrust/detail/vector_base.h | 603 ++ .../thrust/thrust/detail/vector_base.inl | 1345 ++++ .../thrust/thrust/device_allocator.h | 140 + .../thirdparty/thrust/thrust/device_delete.h | 53 + .../thirdparty/thrust/thrust/device_free.h | 65 + .../thrust/thrust/device_make_unique.h | 60 + .../thirdparty/thrust/thrust/device_malloc.h | 100 + .../thrust/thrust/device_malloc_allocator.h | 180 + .../thirdparty/thrust/thrust/device_new.h | 86 + .../thrust/thrust/device_new_allocator.h | 172 + .../thirdparty/thrust/thrust/device_ptr.h | 211 + .../thrust/thrust/device_reference.h | 987 +++ .../thirdparty/thrust/thrust/device_vector.h | 511 ++ .../thirdparty/thrust/thrust/distance.h | 74 + .../manifold/thirdparty/thrust/thrust/equal.h | 235 + .../manifold/thirdparty/thrust/thrust/event.h | 26 + .../thrust/thrust/execution_policy.h | 392 + .../thirdparty/thrust/thrust/extrema.h | 801 ++ .../manifold/thirdparty/thrust/thrust/fill.h | 206 + .../manifold/thirdparty/thrust/thrust/find.h | 381 + .../thirdparty/thrust/thrust/for_each.h | 278 + .../thirdparty/thrust/thrust/functional.h | 1719 +++++ .../thirdparty/thrust/thrust/future.h | 176 + .../thirdparty/thrust/thrust/gather.h | 445 ++ .../thirdparty/thrust/thrust/generate.h | 211 + .../thirdparty/thrust/thrust/host_vector.h | 534 ++ .../thirdparty/thrust/thrust/inner_product.h | 262 + .../thrust/iterator/constant_iterator.h | 250 + .../thrust/iterator/counting_iterator.h | 246 + .../thrust/iterator/detail/any_assign.h | 54 + .../thrust/iterator/detail/any_system_tag.h | 33 + .../iterator/detail/constant_iterator_base.h | 71 + .../iterator/detail/counting_iterator.inl | 142 + .../iterator/detail/device_system_tag.h | 30 + .../iterator/detail/discard_iterator_base.h | 64 + .../iterator/detail/distance_from_result.h | 41 + .../thrust/iterator/detail/host_system_tag.h | 30 + .../iterator/detail/is_iterator_category.h | 59 + .../iterator/detail/iterator_adaptor_base.h | 112 + .../detail/iterator_category_to_system.h | 79 + .../detail/iterator_category_to_traversal.h | 130 + ...rator_category_with_system_and_traversal.h | 56 + .../detail/iterator_facade_category.h | 252 + .../iterator/detail/iterator_traits.inl | 136 + .../iterator/detail/iterator_traversal_tags.h | 42 + .../thrust/iterator/detail/join_iterator.h | 133 + .../thrust/iterator/detail/minimum_category.h | 53 + .../thrust/iterator/detail/minimum_system.h | 81 + .../thrust/iterator/detail/normal_iterator.h | 79 + .../detail/permutation_iterator_base.h | 54 + .../thrust/thrust/iterator/detail/retag.h | 147 + .../iterator/detail/reverse_iterator.inl | 118 + .../iterator/detail/reverse_iterator_base.h | 43 + .../thrust/iterator/detail/tagged_iterator.h | 85 + .../transform_input_output_iterator.inl | 107 + .../iterator/detail/transform_iterator.inl | 75 + .../detail/transform_output_iterator.inl | 81 + .../detail/tuple_of_iterator_references.h | 146 + .../iterator/detail/universal_categories.h | 86 + .../thrust/iterator/detail/zip_iterator.inl | 152 + .../iterator/detail/zip_iterator_base.h | 353 + .../thrust/thrust/iterator/discard_iterator.h | 174 + .../thrust/thrust/iterator/iterator_adaptor.h | 242 + .../thrust/iterator/iterator_categories.h | 223 + .../thrust/thrust/iterator/iterator_facade.h | 542 ++ .../thrust/thrust/iterator/iterator_traits.h | 65 + .../thrust/iterator/permutation_iterator.h | 216 + .../thirdparty/thrust/thrust/iterator/retag.h | 69 + .../thrust/thrust/iterator/reverse_iterator.h | 237 + .../transform_input_output_iterator.h | 164 + .../thrust/iterator/transform_iterator.h | 355 + .../iterator/transform_output_iterator.h | 164 + .../thrust/thrust/iterator/zip_iterator.h | 258 + .../thirdparty/thrust/thrust/limits.h | 18 + .../thirdparty/thrust/thrust/logical.h | 275 + .../thirdparty/thrust/thrust/memory.h | 396 + .../manifold/thirdparty/thrust/thrust/merge.h | 677 ++ .../thirdparty/thrust/thrust/mismatch.h | 257 + .../thirdparty/thrust/thrust/mr/allocator.h | 253 + .../thrust/thrust/mr/device_memory_resource.h | 38 + .../thrust/thrust/mr/disjoint_pool.h | 489 ++ .../thrust/thrust/mr/disjoint_sync_pool.h | 117 + .../thrust/thrust/mr/disjoint_tls_pool.h | 69 + .../thrust/thrust/mr/fancy_pointer_resource.h | 61 + .../thrust/thrust/mr/host_memory_resource.h | 32 + .../thrust/thrust/mr/memory_resource.h | 217 + .../thirdparty/thrust/thrust/mr/new.h | 89 + .../thrust/thrust/mr/polymorphic_adaptor.h | 57 + .../thirdparty/thrust/thrust/mr/pool.h | 507 ++ .../thrust/thrust/mr/pool_options.h | 128 + .../thirdparty/thrust/thrust/mr/sync_pool.h | 114 + .../thirdparty/thrust/thrust/mr/tls_pool.h | 64 + .../thrust/mr/universal_memory_resource.h | 22 + .../thirdparty/thrust/thrust/mr/validator.h | 51 + .../thirdparty/thrust/thrust/optional.h | 2876 +++++++ .../manifold/thirdparty/thrust/thrust/pair.h | 281 + .../thirdparty/thrust/thrust/partition.h | 1436 ++++ .../thrust/thrust/per_device_resource.h | 102 + .../thirdparty/thrust/thrust/random.h | 117 + .../thirdparty/thrust/thrust/reduce.h | 781 ++ .../thirdparty/thrust/thrust/remove.h | 802 ++ .../thirdparty/thrust/thrust/replace.h | 819 ++ .../thirdparty/thrust/thrust/reverse.h | 211 + .../manifold/thirdparty/thrust/thrust/scan.h | 1656 ++++ .../thirdparty/thrust/thrust/scatter.h | 419 + .../thirdparty/thrust/thrust/sequence.h | 293 + .../thirdparty/thrust/thrust/set_operations.h | 2959 ++++++++ .../thirdparty/thrust/thrust/shuffle.h | 179 + .../manifold/thirdparty/thrust/thrust/sort.h | 1358 ++++ .../manifold/thirdparty/thrust/thrust/swap.h | 184 + .../system/cpp/detail/adjacent_difference.h | 23 + .../thrust/system/cpp/detail/assign_value.h | 23 + .../thrust/system/cpp/detail/binary_search.h | 23 + .../thrust/thrust/system/cpp/detail/copy.h | 23 + .../thrust/thrust/system/cpp/detail/copy_if.h | 23 + .../thrust/thrust/system/cpp/detail/count.h | 22 + .../thrust/thrust/system/cpp/detail/equal.h | 22 + .../system/cpp/detail/execution_policy.h | 80 + .../thrust/thrust/system/cpp/detail/extrema.h | 23 + .../thrust/thrust/system/cpp/detail/fill.h | 22 + .../thrust/thrust/system/cpp/detail/find.h | 23 + .../thrust/system/cpp/detail/for_each.h | 23 + .../thrust/thrust/system/cpp/detail/gather.h | 22 + .../thrust/system/cpp/detail/generate.h | 22 + .../thrust/system/cpp/detail/get_value.h | 23 + .../thrust/system/cpp/detail/inner_product.h | 22 + .../thrust/system/cpp/detail/iter_swap.h | 23 + .../thrust/thrust/system/cpp/detail/logical.h | 22 + .../system/cpp/detail/malloc_and_free.h | 23 + .../thrust/system/cpp/detail/memory.inl | 52 + .../thrust/thrust/system/cpp/detail/merge.h | 23 + .../thrust/system/cpp/detail/mismatch.h | 22 + .../thrust/thrust/system/cpp/detail/par.h | 61 + .../thrust/system/cpp/detail/partition.h | 23 + .../system/cpp/detail/per_device_resource.h | 22 + .../thrust/thrust/system/cpp/detail/reduce.h | 23 + .../thrust/system/cpp/detail/reduce_by_key.h | 23 + .../thrust/thrust/system/cpp/detail/remove.h | 23 + .../thrust/thrust/system/cpp/detail/replace.h | 22 + .../thrust/thrust/system/cpp/detail/reverse.h | 22 + .../thrust/thrust/system/cpp/detail/scan.h | 23 + .../thrust/system/cpp/detail/scan_by_key.h | 23 + .../thrust/thrust/system/cpp/detail/scatter.h | 22 + .../thrust/system/cpp/detail/sequence.h | 22 + .../thrust/system/cpp/detail/set_operations.h | 23 + .../thrust/thrust/system/cpp/detail/sort.h | 23 + .../thrust/system/cpp/detail/swap_ranges.h | 22 + .../thrust/system/cpp/detail/tabulate.h | 22 + .../system/cpp/detail/temporary_buffer.h | 22 + .../thrust/system/cpp/detail/transform.h | 22 + .../system/cpp/detail/transform_reduce.h | 22 + .../thrust/system/cpp/detail/transform_scan.h | 22 + .../system/cpp/detail/uninitialized_copy.h | 22 + .../system/cpp/detail/uninitialized_fill.h | 22 + .../thrust/thrust/system/cpp/detail/unique.h | 23 + .../thrust/system/cpp/detail/unique_by_key.h | 23 + .../thrust/system/cpp/detail/vector.inl | 146 + .../thrust/system/cpp/execution_policy.h | 156 + .../thrust/thrust/system/cpp/memory.h | 100 + .../thrust/system/cpp/memory_resource.h | 71 + .../thrust/thrust/system/cpp/pointer.h | 117 + .../thrust/thrust/system/cpp/vector.h | 82 + .../system/detail/adl/adjacent_difference.h | 44 + .../thrust/system/detail/adl/assign_value.h | 44 + .../thrust/system/detail/adl/async/copy.h | 34 + .../thrust/system/detail/adl/async/for_each.h | 34 + .../thrust/system/detail/adl/async/reduce.h | 34 + .../thrust/system/detail/adl/async/scan.h | 34 + .../thrust/system/detail/adl/async/sort.h | 34 + .../system/detail/adl/async/transform.h | 34 + .../thrust/system/detail/adl/binary_search.h | 44 + .../thrust/thrust/system/detail/adl/copy.h | 44 + .../thrust/thrust/system/detail/adl/copy_if.h | 44 + .../thrust/thrust/system/detail/adl/count.h | 44 + .../thrust/thrust/system/detail/adl/equal.h | 44 + .../thrust/thrust/system/detail/adl/extrema.h | 44 + .../thrust/thrust/system/detail/adl/fill.h | 44 + .../thrust/thrust/system/detail/adl/find.h | 44 + .../thrust/system/detail/adl/for_each.h | 44 + .../thrust/thrust/system/detail/adl/gather.h | 44 + .../thrust/system/detail/adl/generate.h | 44 + .../thrust/system/detail/adl/get_value.h | 44 + .../thrust/system/detail/adl/inner_product.h | 44 + .../thrust/system/detail/adl/iter_swap.h | 44 + .../thrust/thrust/system/detail/adl/logical.h | 44 + .../system/detail/adl/malloc_and_free.h | 44 + .../thrust/thrust/system/detail/adl/merge.h | 44 + .../thrust/system/detail/adl/mismatch.h | 44 + .../thrust/system/detail/adl/partition.h | 44 + .../system/detail/adl/per_device_resource.h | 41 + .../thrust/thrust/system/detail/adl/reduce.h | 44 + .../thrust/system/detail/adl/reduce_by_key.h | 44 + .../thrust/thrust/system/detail/adl/remove.h | 44 + .../thrust/thrust/system/detail/adl/replace.h | 44 + .../thrust/thrust/system/detail/adl/reverse.h | 44 + .../thrust/thrust/system/detail/adl/scan.h | 44 + .../thrust/system/detail/adl/scan_by_key.h | 44 + .../thrust/thrust/system/detail/adl/scatter.h | 44 + .../thrust/system/detail/adl/sequence.h | 44 + .../thrust/system/detail/adl/set_operations.h | 44 + .../thrust/thrust/system/detail/adl/sort.h | 44 + .../thrust/system/detail/adl/swap_ranges.h | 44 + .../thrust/system/detail/adl/tabulate.h | 44 + .../system/detail/adl/temporary_buffer.h | 44 + .../thrust/system/detail/adl/transform.h | 44 + .../system/detail/adl/transform_reduce.h | 44 + .../thrust/system/detail/adl/transform_scan.h | 44 + .../system/detail/adl/uninitialized_copy.h | 44 + .../system/detail/adl/uninitialized_fill.h | 44 + .../thrust/thrust/system/detail/adl/unique.h | 44 + .../thrust/system/detail/adl/unique_by_key.h | 44 + .../thrust/thrust/system/detail/bad_alloc.h | 58 + .../thrust/thrust/system/detail/errno.h | 119 + .../thrust/system/detail/error_category.inl | 237 + .../thrust/system/detail/error_code.inl | 198 + .../thrust/system/detail/error_condition.inl | 134 + .../detail/generic/adjacent_difference.h | 57 + .../detail/generic/adjacent_difference.inl | 82 + .../thrust/system/detail/generic/advance.h | 40 + .../thrust/system/detail/generic/advance.inl | 68 + .../system/detail/generic/binary_search.h | 173 + .../system/detail/generic/binary_search.inl | 396 + .../thrust/system/detail/generic/copy.h | 58 + .../thrust/system/detail/generic/copy.inl | 80 + .../thrust/system/detail/generic/copy_if.h | 63 + .../thrust/system/detail/generic/copy_if.inl | 160 + .../thrust/system/detail/generic/count.h | 50 + .../thrust/system/detail/generic/count.inl | 82 + .../thrust/system/detail/generic/distance.h | 42 + .../thrust/system/detail/generic/distance.inl | 80 + .../thrust/system/detail/generic/equal.h | 47 + .../thrust/system/detail/generic/equal.inl | 59 + .../thrust/system/detail/generic/extrema.h | 88 + .../thrust/system/detail/generic/extrema.inl | 262 + .../thrust/system/detail/generic/fill.h | 61 + .../thrust/system/detail/generic/find.h | 62 + .../thrust/system/detail/generic/find.inl | 151 + .../thrust/system/detail/generic/for_each.h | 77 + .../thrust/system/detail/generic/gather.h | 80 + .../thrust/system/detail/generic/gather.inl | 108 + .../thrust/system/detail/generic/generate.h | 56 + .../thrust/system/detail/generic/generate.inl | 100 + .../system/detail/generic/inner_product.h | 58 + .../system/detail/generic/inner_product.inl | 73 + .../thrust/system/detail/generic/logical.h | 62 + .../thrust/system/detail/generic/memory.h | 70 + .../thrust/system/detail/generic/memory.inl | 105 + .../thrust/system/detail/generic/merge.h | 90 + .../thrust/system/detail/generic/merge.inl | 130 + .../thrust/system/detail/generic/mismatch.h | 57 + .../thrust/system/detail/generic/mismatch.inl | 75 + .../thrust/system/detail/generic/partition.h | 169 + .../system/detail/generic/partition.inl | 249 + .../detail/generic/per_device_resource.h | 46 + .../thrust/system/detail/generic/reduce.h | 58 + .../thrust/system/detail/generic/reduce.inl | 80 + .../system/detail/generic/reduce_by_key.h | 88 + .../system/detail/generic/reduce_by_key.inl | 193 + .../thrust/system/detail/generic/remove.h | 112 + .../thrust/system/detail/generic/remove.inl | 146 + .../thrust/system/detail/generic/replace.h | 97 + .../thrust/system/detail/generic/replace.inl | 179 + .../thrust/system/detail/generic/reverse.h | 55 + .../thrust/system/detail/generic/reverse.inl | 76 + .../detail/generic/scalar/binary_search.h | 84 + .../detail/generic/scalar/binary_search.inl | 157 + .../thrust/system/detail/generic/scan.h | 98 + .../thrust/system/detail/generic/scan.inl | 127 + .../system/detail/generic/scan_by_key.h | 143 + .../system/detail/generic/scan_by_key.inl | 245 + .../thrust/system/detail/generic/scatter.h | 80 + .../thrust/system/detail/generic/scatter.inl | 97 + .../system/detail/generic/select_system.h | 124 + .../system/detail/generic/select_system.inl | 178 + .../detail/generic/select_system_exists.h | 167 + .../thrust/system/detail/generic/sequence.h | 63 + .../thrust/system/detail/generic/sequence.inl | 106 + .../system/detail/generic/set_operations.h | 318 + .../system/detail/generic/set_operations.inl | 476 ++ .../thrust/system/detail/generic/shuffle.h | 54 + .../thrust/system/detail/generic/shuffle.inl | 190 + .../thrust/system/detail/generic/sort.h | 153 + .../thrust/system/detail/generic/sort.inl | 219 + .../system/detail/generic/swap_ranges.h | 46 + .../system/detail/generic/swap_ranges.inl | 79 + .../thrust/system/detail/generic/tabulate.h | 48 + .../thrust/system/detail/generic/tabulate.inl | 61 + .../thrust/thrust/system/detail/generic/tag.h | 47 + .../system/detail/generic/temporary_buffer.h | 57 + .../detail/generic/temporary_buffer.inl | 86 + .../thrust/system/detail/generic/transform.h | 105 + .../system/detail/generic/transform.inl | 191 + .../system/detail/generic/transform_reduce.h | 52 + .../detail/generic/transform_reduce.inl | 57 + .../system/detail/generic/transform_scan.h | 67 + .../system/detail/generic/transform_scan.inl | 91 + .../detail/generic/uninitialized_copy.h | 56 + .../detail/generic/uninitialized_copy.inl | 194 + .../detail/generic/uninitialized_fill.h | 56 + .../detail/generic/uninitialized_fill.inl | 135 + .../thrust/system/detail/generic/unique.h | 97 + .../thrust/system/detail/generic/unique.inl | 139 + .../system/detail/generic/unique_by_key.h | 94 + .../system/detail/generic/unique_by_key.inl | 139 + .../thrust/system/detail/internal/decompose.h | 113 + .../detail/sequential/adjacent_difference.h | 73 + .../system/detail/sequential/assign_value.h | 42 + .../system/detail/sequential/binary_search.h | 158 + .../thrust/system/detail/sequential/copy.h | 62 + .../thrust/system/detail/sequential/copy.inl | 146 + .../system/detail/sequential/copy_backward.h | 53 + .../thrust/system/detail/sequential/copy_if.h | 72 + .../thrust/system/detail/sequential/count.h | 22 + .../thrust/system/detail/sequential/equal.h | 22 + .../detail/sequential/execution_policy.h | 75 + .../thrust/system/detail/sequential/extrema.h | 138 + .../thrust/system/detail/sequential/fill.h | 22 + .../thrust/system/detail/sequential/find.h | 70 + .../system/detail/sequential/for_each.h | 94 + .../thrust/system/detail/sequential/gather.h | 22 + .../system/detail/sequential/general_copy.h | 146 + .../system/detail/sequential/generate.h | 22 + .../system/detail/sequential/get_value.h | 45 + .../system/detail/sequential/inner_product.h | 22 + .../system/detail/sequential/insertion_sort.h | 152 + .../system/detail/sequential/iter_swap.h | 46 + .../thrust/system/detail/sequential/logical.h | 22 + .../detail/sequential/malloc_and_free.h | 53 + .../thrust/system/detail/sequential/merge.h | 79 + .../thrust/system/detail/sequential/merge.inl | 154 + .../system/detail/sequential/mismatch.h | 22 + .../system/detail/sequential/partition.h | 340 + .../detail/sequential/per_device_resource.h | 22 + .../thrust/system/detail/sequential/reduce.h | 72 + .../system/detail/sequential/reduce_by_key.h | 102 + .../thrust/system/detail/sequential/remove.h | 201 + .../thrust/system/detail/sequential/replace.h | 22 + .../thrust/system/detail/sequential/reverse.h | 22 + .../thrust/system/detail/sequential/scan.h | 121 + .../system/detail/sequential/scan_by_key.h | 149 + .../thrust/system/detail/sequential/scatter.h | 22 + .../system/detail/sequential/sequence.h | 22 + .../system/detail/sequential/set_operations.h | 223 + .../thrust/system/detail/sequential/sort.h | 63 + .../thrust/system/detail/sequential/sort.inl | 208 + .../detail/sequential/stable_merge_sort.h | 59 + .../detail/sequential/stable_merge_sort.inl | 393 + .../detail/sequential/stable_primitive_sort.h | 55 + .../sequential/stable_primitive_sort.inl | 160 + .../detail/sequential/stable_radix_sort.h | 55 + .../detail/sequential/stable_radix_sort.inl | 597 ++ .../system/detail/sequential/swap_ranges.h | 22 + .../system/detail/sequential/tabulate.h | 22 + .../detail/sequential/temporary_buffer.h | 22 + .../system/detail/sequential/transform.h | 22 + .../detail/sequential/transform_reduce.h | 22 + .../system/detail/sequential/transform_scan.h | 22 + .../system/detail/sequential/trivial_copy.h | 61 + .../detail/sequential/uninitialized_copy.h | 22 + .../detail/sequential/uninitialized_fill.h | 22 + .../thrust/system/detail/sequential/unique.h | 130 + .../system/detail/sequential/unique_by_key.h | 115 + .../thrust/system/detail/system_error.inl | 112 + .../thrust/thrust/system/error_code.h | 522 ++ .../thrust/thrust/system/system_error.h | 178 + .../thirdparty/thrust/thrust/system_error.h | 49 + .../thirdparty/thrust/thrust/tabulate.h | 125 + .../thirdparty/thrust/thrust/transform.h | 721 ++ .../thrust/thrust/transform_reduce.h | 194 + .../thirdparty/thrust/thrust/transform_scan.h | 320 + .../manifold/thirdparty/thrust/thrust/tuple.h | 575 ++ .../thrust/type_traits/integer_sequence.h | 381 + .../type_traits/is_contiguous_iterator.h | 295 + .../thrust/type_traits/is_execution_policy.h | 69 + ...operator_less_or_greater_function_object.h | 208 + .../is_operator_plus_function_object.h | 118 + .../type_traits/is_trivially_relocatable.h | 366 + .../type_traits/logical_metafunctions.h | 287 + .../thrust/thrust/type_traits/remove_cvref.h | 97 + .../thrust/thrust/type_traits/void_t.h | 77 + .../thrust/thrust/uninitialized_copy.h | 299 + .../thrust/thrust/uninitialized_fill.h | 272 + .../thirdparty/thrust/thrust/unique.h | 1140 +++ .../thrust/thrust/universal_allocator.h | 77 + .../thirdparty/thrust/thrust/universal_ptr.h | 26 + .../thrust/thrust/universal_vector.h | 55 + .../thirdparty/thrust/thrust/version.h | 75 + .../thirdparty/thrust/thrust/zip_function.h | 212 + 1306 files changed, 332430 insertions(+), 1761 deletions(-) delete mode 100644 modules/csg/csg.cpp delete mode 100644 modules/csg/csg.h create mode 100644 thirdparty/manifold/.gitrepo create mode 100644 thirdparty/manifold/AUTHORS create mode 100644 thirdparty/manifold/LICENSE create mode 100644 thirdparty/manifold/src/collider/include/collider.h create mode 100644 thirdparty/manifold/src/collider/src/collider.cpp create mode 100644 thirdparty/manifold/src/cross_section/include/cross_section.h create mode 100644 thirdparty/manifold/src/cross_section/src/cross_section.cpp create mode 100644 thirdparty/manifold/src/manifold/include/manifold.h create mode 100644 thirdparty/manifold/src/manifold/src/boolean3.cpp create mode 100644 thirdparty/manifold/src/manifold/src/boolean3.h create mode 100644 thirdparty/manifold/src/manifold/src/boolean_result.cpp create mode 100644 thirdparty/manifold/src/manifold/src/constructors.cpp create mode 100644 thirdparty/manifold/src/manifold/src/csg_tree.cpp create mode 100644 thirdparty/manifold/src/manifold/src/csg_tree.h create mode 100644 thirdparty/manifold/src/manifold/src/edge_op.cpp create mode 100644 thirdparty/manifold/src/manifold/src/face_op.cpp create mode 100644 thirdparty/manifold/src/manifold/src/impl.cpp create mode 100644 thirdparty/manifold/src/manifold/src/impl.h create mode 100644 thirdparty/manifold/src/manifold/src/manifold.cpp create mode 100644 thirdparty/manifold/src/manifold/src/mesh_fixes.h create mode 100644 thirdparty/manifold/src/manifold/src/properties.cpp create mode 100644 thirdparty/manifold/src/manifold/src/shared.h create mode 100644 thirdparty/manifold/src/manifold/src/smoothing.cpp create mode 100644 thirdparty/manifold/src/manifold/src/sort.cpp create mode 100644 thirdparty/manifold/src/manifold/src/subdivision.cpp create mode 100644 thirdparty/manifold/src/polygon/include/polygon.h create mode 100644 thirdparty/manifold/src/polygon/src/polygon.cpp create mode 100644 thirdparty/manifold/src/sdf/include/sdf.h create mode 100644 thirdparty/manifold/src/sdf/src/sdf.cpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/.gitignore create mode 100644 thirdparty/manifold/src/third_party/quickhull/.gitrepo create mode 100644 thirdparty/manifold/src/third_party/quickhull/ConvexHull.hpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/HalfEdgeMesh.hpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/MathUtils.hpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/QuickHull.cpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/QuickHull.hpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/README.md create mode 100644 thirdparty/manifold/src/third_party/quickhull/Structs/Mesh.hpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/Structs/Plane.hpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/Structs/Pool.hpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/Structs/Ray.hpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/Structs/Vector3.hpp create mode 100644 thirdparty/manifold/src/third_party/quickhull/Structs/VertexDataSource.hpp create mode 100644 thirdparty/manifold/src/utilities/include/hashtable.h create mode 100644 thirdparty/manifold/src/utilities/include/optional_assert.h create mode 100644 thirdparty/manifold/src/utilities/include/par.h create mode 100644 thirdparty/manifold/src/utilities/include/public.h create mode 100644 thirdparty/manifold/src/utilities/include/sparse.h create mode 100644 thirdparty/manifold/src/utilities/include/svd.h create mode 100644 thirdparty/manifold/src/utilities/include/tri_dist.h create mode 100644 thirdparty/manifold/src/utilities/include/utils.h create mode 100644 thirdparty/manifold/src/utilities/include/vec.h create mode 100644 thirdparty/manifold/src/utilities/include/vec_view.h create mode 100644 thirdparty/manifold/thirdparty/glm/.gitrepo create mode 100644 thirdparty/manifold/thirdparty/glm/copying.txt create mode 100644 thirdparty/manifold/thirdparty/glm/glm/CMakeLists.txt create mode 100644 thirdparty/manifold/thirdparty/glm/glm/common.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/_features.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/_fixes.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/_noise.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/_swizzle.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/_swizzle_func.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/_vectorize.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/compute_common.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/compute_vector_relational.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_common.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_common_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_exponential.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_exponential_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_geometric.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_geometric_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_integer.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_integer_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_matrix.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_matrix_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_packing.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_packing_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_trigonometric.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_trigonometric_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_vector_relational.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/func_vector_relational_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/glm.cpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/qualifier.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/setup.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_float.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_half.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_half.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x2.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x3.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x4.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x2.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x3.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x4.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x2.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x3.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_quat.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_quat.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_quat_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_vec1.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_vec1.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_vec2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_vec2.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_vec3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_vec3.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/exponential.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/_matrix_vectorize.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_clip_space.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_clip_space.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_common.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_common.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x2_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x3_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x4_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x2_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x3_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x4_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x2_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x3_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x4_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x2_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x3_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x4_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x2_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x3_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x4_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x2_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x3_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x4_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x2_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x3_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x4_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x2_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x3_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x4_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x2_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x3_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x4_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_integer.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_integer.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_projection.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_projection.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_relational.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_relational.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_transform.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_transform.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x2_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x3_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x4_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x2_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x3_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x4_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x2_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x3_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x4_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_double.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_double_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_exponential.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_exponential.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_float.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_float_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_geometric.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_geometric.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_relational.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_relational.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_transform.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_transform.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_trigonometric.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_trigonometric.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_common.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_common.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_constants.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_constants.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_int_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_integer.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_integer.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_packing.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_packing.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_reciprocal.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_reciprocal.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_relational.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_relational.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_uint_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_ulp.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/scalar_ulp.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool1.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool1_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool2_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool3_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool4_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_common.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_common.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_double1.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_double1_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_double2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_double2_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_double3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_double3_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_double4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_double4_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_float1.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_float1_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_float2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_float2_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_float3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_float3_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_float4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_float4_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_int1.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_int1_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_int2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_int2_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_int3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_int3_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_int4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_int4_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_integer.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_integer.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_packing.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_packing.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_reciprocal.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_reciprocal.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_relational.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_relational.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint1.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint1_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint2_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint3_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint4_sized.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_ulp.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/ext/vector_ulp.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/fwd.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/geometric.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/glm.cppm create mode 100644 thirdparty/manifold/thirdparty/glm/glm/glm.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/bitfield.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/bitfield.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/color_space.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/color_space.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/constants.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/constants.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/epsilon.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/epsilon.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/integer.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/integer.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_access.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_access.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_integer.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_inverse.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_inverse.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_transform.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_transform.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/noise.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/noise.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/packing.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/packing.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion_simd.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/random.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/random.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/reciprocal.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/round.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/round.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/type_aligned.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/type_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/type_precision.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/type_ptr.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/type_ptr.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/ulp.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/ulp.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtc/vec1.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/associated_min_max.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/associated_min_max.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/bit.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/bit.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/closest_point.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/closest_point.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/color_encoding.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/color_encoding.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/color_space.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/color_space.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/color_space_YCoCg.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/color_space_YCoCg.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/common.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/common.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/compatibility.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/compatibility.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/component_wise.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/component_wise.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/dual_quaternion.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/dual_quaternion.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/easing.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/easing.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/euler_angles.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/euler_angles.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/extend.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/extend.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/extended_min_max.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/extended_min_max.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/exterior_product.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/exterior_product.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/fast_exponential.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/fast_exponential.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/fast_square_root.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/fast_square_root.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/fast_trigonometry.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/fast_trigonometry.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/float_notmalize.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/functions.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/functions.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/gradient_paint.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/gradient_paint.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/handed_coordinate_space.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/handed_coordinate_space.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/hash.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/hash.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/integer.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/integer.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/intersect.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/intersect.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/io.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/io.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/log_base.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/log_base.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_cross_product.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_cross_product.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_decompose.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_decompose.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_factorisation.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_factorisation.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_interpolation.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_interpolation.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_major_storage.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_major_storage.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_operation.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_operation.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_query.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_query.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_transform_2d.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_transform_2d.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/mixed_product.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/mixed_product.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/norm.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/norm.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/normal.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/normal.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/normalize_dot.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/normalize_dot.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/number_precision.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/optimum_pow.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/optimum_pow.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/orthonormalize.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/orthonormalize.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/pca.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/pca.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/perpendicular.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/perpendicular.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/polar_coordinates.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/polar_coordinates.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/projection.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/projection.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/quaternion.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/quaternion.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/range.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/raw_data.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/raw_data.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_normalized_axis.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_normalized_axis.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_vector.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_vector.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_multiplication.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_relational.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_relational.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/spline.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/spline.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/std_based_type.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/std_based_type.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/string_cast.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/string_cast.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/texture.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/texture.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/transform.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/transform.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/transform2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/transform2.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/type_aligned.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/type_aligned.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/type_trait.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/type_trait.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/vec_swizzle.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/vector_angle.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/vector_angle.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/vector_query.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/vector_query.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/wrap.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/gtx/wrap.inl create mode 100644 thirdparty/manifold/thirdparty/glm/glm/integer.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/mat2x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/mat2x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/mat2x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/mat3x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/mat3x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/mat3x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/mat4x2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/mat4x3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/mat4x4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/matrix.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/packing.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/simd/common.h create mode 100644 thirdparty/manifold/thirdparty/glm/glm/simd/exponential.h create mode 100644 thirdparty/manifold/thirdparty/glm/glm/simd/geometric.h create mode 100644 thirdparty/manifold/thirdparty/glm/glm/simd/integer.h create mode 100644 thirdparty/manifold/thirdparty/glm/glm/simd/matrix.h create mode 100644 thirdparty/manifold/thirdparty/glm/glm/simd/neon.h create mode 100644 thirdparty/manifold/thirdparty/glm/glm/simd/packing.h create mode 100644 thirdparty/manifold/thirdparty/glm/glm/simd/platform.h create mode 100644 thirdparty/manifold/thirdparty/glm/glm/simd/trigonometric.h create mode 100644 thirdparty/manifold/thirdparty/glm/glm/simd/vector_relational.h create mode 100644 thirdparty/manifold/thirdparty/glm/glm/trigonometric.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/vec2.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/vec3.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/vec4.hpp create mode 100644 thirdparty/manifold/thirdparty/glm/glm/vector_relational.hpp create mode 100644 thirdparty/manifold/thirdparty/thrust/.gitrepo create mode 100644 thirdparty/manifold/thirdparty/thrust/LICENSE create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/.gitrepo create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/LICENSE.TXT create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/annotated_ptr create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/atomic create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/barrier create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/latch create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/pipeline create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/semaphore create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/array create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/atomic create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/barrier create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/bit create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cassert create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/ccomplex create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cfloat create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/chrono create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/climits create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cmath create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/complex create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cstddef create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cstdint create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/ctime create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__access_property create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__annotated_ptr create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__config create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__functional_base create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__pragma_pop create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__pragma_push create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__threading_support create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/CMakeLists.txt create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bit_reference create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bsd_locale_defaults.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bsd_locale_fallbacks.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__config create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__config_site.in create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__debug create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__errc create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_03 create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base_03 create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__hash_table create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__libcpp_version create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__locale create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__mutex_base create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__node_handle create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__nullptr create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_pop create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_push create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__split_buffer create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__sso_allocator create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__std_stream create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__string create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__threading_support create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__tree create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__tuple create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__undef_macros create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/algorithm create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/any create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/array create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/atomic create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/barrier create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/bit create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/bitset create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cassert create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ccomplex create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cctype create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cerrno create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cfenv create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cfloat create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/charconv create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/chrono create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cinttypes create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ciso646 create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/climits create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/clocale create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cmath create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/codecvt create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/compare create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/complex create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/complex.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/condition_variable create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/csetjmp create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/csignal create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cstdarg create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cstdbool create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cstddef create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cstdint create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cstdio create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cstdlib create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cstring create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ctgmath create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ctime create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ctype.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cwchar create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/cwctype create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/deque create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/errno.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/exception create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/execution create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/__config create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/__memory create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/algorithm create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/coroutine create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/deque create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/filesystem create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/forward_list create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/functional create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/iterator create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/list create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/map create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/memory_resource create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/propagate_const create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/regex create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/set create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/simd create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/string create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/type_traits create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/unordered_map create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/unordered_set create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/utility create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/experimental/vector create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ext/__hash create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ext/hash_map create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ext/hash_set create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/fenv.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/filesystem create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/float.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/forward_list create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/fstream create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/functional create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/future create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/initializer_list create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/inttypes.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/iomanip create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ios create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/iosfwd create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/iostream create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/istream create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/iterator create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/latch create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/limits create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/limits.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/list create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/locale create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/locale.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/map create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/math.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/memory create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/module.modulemap create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/mutex create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/new create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/numeric create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/optional create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ostream create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/queue create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/random create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/ratio create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/regex create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/scoped_allocator create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/semaphore create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/set create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/setjmp.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/shared_mutex create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/span create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/sstream create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/stack create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/stdbool.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/stddef.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/stdexcept create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/stdint.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/stdio.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/stdlib.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/streambuf create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/string create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/string.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/string_view create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/strstream create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/android/locale_bionic.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/atomic/atomic_base.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/atomic/atomic_c11.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/atomic/atomic_cuda.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/atomic/atomic_cuda_derived.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/atomic/atomic_cuda_generated.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/atomic/atomic_gcc.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/atomic/atomic_msvc.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/atomic/atomic_nvrtc.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/atomic/atomic_scopes.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/atomic/cxx_atomic.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/fuchsia/xlocale.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/ibm/limits.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/ibm/locale_mgmt_aix.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/ibm/support.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/ibm/xlocale.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/musl/xlocale.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/newlib/xlocale.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/solaris/floatingpoint.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/solaris/wchar.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/solaris/xlocale.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/xlocale/__nop_locale_mgmt.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/xlocale/__posix_l_fallback.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/support/xlocale/__strtonum_fallback.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/system_error create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/tgmath.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/thread create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/tuple create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/type_traits create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/typeindex create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/typeinfo create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/unordered_map create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/unordered_set create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/utility create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/valarray create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/variant create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/vector create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/version create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/wchar.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/wctype.h create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/functional create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/initializer_list create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/iterator create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/latch create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/limits create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/ratio create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/semaphore create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/tuple create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/type_traits create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/utility create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/version create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/nv/detail/__preprocessor create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/nv/detail/__target_macros create mode 100644 thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/nv/target create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/addressof.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/adjacent_difference.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/advance.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/allocate_unique.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/async/copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/async/for_each.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/async/reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/async/scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/async/sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/async/transform.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/binary_search.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/complex.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/count.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/adjacent_difference.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/advance.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/algorithm_wrapper.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/alignment.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/allocator_traits.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/allocator_traits.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/copy_construct_range.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/copy_construct_range.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/default_construct_range.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/default_construct_range.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/destroy_range.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/destroy_range.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/fill_construct_range.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/fill_construct_range.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/malloc_allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/malloc_allocator.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/no_throw_allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/tagged_allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/tagged_allocator.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/temporary_allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator/temporary_allocator.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/allocator_aware_execution_policy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/binary_search.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/caching_allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/arithmetic.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/c99math.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/catrig.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/catrigf.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/ccosh.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/ccoshf.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/cexp.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/cexpf.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/clog.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/clogf.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/complex.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/cpow.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/cproj.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/csinh.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/csinhf.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/csqrt.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/csqrtf.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/ctanh.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/ctanhf.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/math_private.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/complex/stream.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/compiler.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/compiler_fence.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/config.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/cpp_compatibility.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/cpp_dialect.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/debug.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/deprecated.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/device_system.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/exec_check_disable.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/forceinline.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/global_workarounds.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/host_device.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/host_system.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/memory_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/namespace.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/config/simple_defines.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/contiguous_storage.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/contiguous_storage.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/copy.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/copy_if.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/copy_if.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/count.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/count.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/cpp11_required.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/cpp14_required.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/cstdint.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/dependencies_aware_execution_policy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/device_delete.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/device_free.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/device_malloc.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/device_new.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/device_ptr.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/distance.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/equal.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/event_error.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/execute_with_allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/execute_with_allocator_fwd.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/execute_with_dependencies.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/execution_policy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/extrema.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/fill.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/find.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/for_each.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/function.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/actor.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/actor.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/argument.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/composite.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/operators.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/operators/arithmetic_operators.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/operators/assignment_operator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/operators/bitwise_operators.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/operators/compound_assignment_operators.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/operators/logical_operators.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/operators/operator_adaptors.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/operators/relational_operators.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/placeholder.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/functional/value.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/gather.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/generate.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/get_iterator_value.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/inner_product.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/integer_math.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/integer_traits.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/internal_functional.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/logical.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/malloc_and_free.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/memory_algorithms.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/memory_wrapper.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/merge.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/minmax.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/mismatch.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/modern_gcc_required.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/mpl/math.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/numeric_traits.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/numeric_wrapper.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/overlapped_copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/pair.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/partition.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/pointer.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/pointer.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/preprocessor.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/range/head_flags.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/range/tail_flags.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/raw_pointer_cast.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/raw_reference_cast.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/reduce.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/reference.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/reference_forward_declaration.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/remove.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/replace.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/reverse.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/scan.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/scatter.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/select_system.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/seq.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/sequence.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/set_operations.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/shuffle.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/sort.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/static_assert.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/static_map.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/swap.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/swap.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/swap_ranges.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/tabulate.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/temporary_array.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/temporary_array.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/temporary_buffer.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/transform.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/transform_reduce.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/transform_scan.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/trivial_sequence.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/tuple.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/tuple_algorithms.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/tuple_meta_transform.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/tuple_transform.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_deduction.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/function_traits.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/has_member_function.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/has_nested_type.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/has_trivial_assign.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/is_call_possible.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/is_metafunction_defined.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/iterator/is_discard_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/iterator/is_output_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/minimum_type.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/pointer_traits.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/type_traits/result_of_adaptable_function.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/uninitialized_copy.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/uninitialized_fill.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/unique.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/use_default.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/util/align.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/vector_base.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/detail/vector_base.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_delete.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_free.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_make_unique.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_malloc.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_malloc_allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_new.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_new_allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_ptr.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_reference.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/device_vector.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/distance.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/equal.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/event.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/execution_policy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/extrema.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/fill.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/find.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/for_each.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/functional.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/future.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/gather.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/generate.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/host_vector.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/inner_product.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/constant_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/counting_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/any_assign.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/any_system_tag.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/constant_iterator_base.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/counting_iterator.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/device_system_tag.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/discard_iterator_base.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/distance_from_result.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/host_system_tag.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/is_iterator_category.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/iterator_adaptor_base.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/iterator_category_to_system.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/iterator_category_to_traversal.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/iterator_category_with_system_and_traversal.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/iterator_facade_category.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/iterator_traits.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/iterator_traversal_tags.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/join_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/minimum_category.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/minimum_system.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/normal_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/permutation_iterator_base.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/retag.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/reverse_iterator.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/reverse_iterator_base.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/tagged_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/transform_input_output_iterator.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/transform_iterator.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/transform_output_iterator.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/tuple_of_iterator_references.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/universal_categories.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/zip_iterator.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/detail/zip_iterator_base.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/discard_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/iterator_adaptor.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/iterator_categories.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/iterator_facade.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/iterator_traits.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/permutation_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/retag.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/reverse_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/transform_input_output_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/transform_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/transform_output_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/iterator/zip_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/limits.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/logical.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/memory.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/merge.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mismatch.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/device_memory_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/disjoint_pool.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/disjoint_sync_pool.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/disjoint_tls_pool.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/fancy_pointer_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/host_memory_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/memory_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/new.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/polymorphic_adaptor.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/pool.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/pool_options.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/sync_pool.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/tls_pool.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/universal_memory_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/mr/validator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/optional.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/pair.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/partition.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/per_device_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/random.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/remove.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/replace.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/reverse.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/scatter.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/sequence.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/set_operations.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/shuffle.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/swap.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/adjacent_difference.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/assign_value.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/binary_search.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/copy_if.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/count.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/equal.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/execution_policy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/extrema.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/fill.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/find.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/for_each.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/gather.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/generate.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/get_value.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/inner_product.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/iter_swap.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/logical.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/malloc_and_free.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/memory.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/merge.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/mismatch.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/par.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/partition.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/per_device_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/reduce_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/remove.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/replace.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/reverse.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/scan_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/scatter.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/sequence.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/set_operations.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/swap_ranges.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/tabulate.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/temporary_buffer.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/transform.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/transform_reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/transform_scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/uninitialized_copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/uninitialized_fill.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/unique.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/unique_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/detail/vector.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/execution_policy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/memory.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/memory_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/pointer.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/cpp/vector.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/adjacent_difference.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/assign_value.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/async/copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/async/for_each.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/async/reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/async/scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/async/sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/async/transform.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/binary_search.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/copy_if.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/count.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/equal.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/extrema.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/fill.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/find.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/for_each.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/gather.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/generate.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/get_value.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/inner_product.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/iter_swap.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/logical.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/malloc_and_free.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/merge.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/mismatch.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/partition.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/per_device_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/reduce_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/remove.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/replace.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/reverse.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/scan_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/scatter.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/sequence.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/set_operations.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/swap_ranges.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/tabulate.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/temporary_buffer.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/transform.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/transform_reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/transform_scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/uninitialized_copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/uninitialized_fill.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/unique.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/adl/unique_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/bad_alloc.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/errno.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/error_category.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/error_code.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/error_condition.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/adjacent_difference.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/adjacent_difference.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/advance.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/advance.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/binary_search.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/binary_search.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/copy.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/copy_if.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/copy_if.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/count.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/count.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/distance.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/distance.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/equal.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/equal.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/extrema.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/extrema.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/fill.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/find.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/find.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/for_each.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/gather.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/gather.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/generate.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/generate.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/inner_product.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/inner_product.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/logical.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/memory.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/memory.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/merge.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/merge.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/mismatch.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/mismatch.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/partition.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/partition.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/per_device_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/reduce.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/reduce_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/reduce_by_key.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/remove.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/remove.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/replace.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/replace.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/reverse.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/reverse.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/scalar/binary_search.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/scalar/binary_search.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/scan.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/scan_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/scan_by_key.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/scatter.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/scatter.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/select_system.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/select_system.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/select_system_exists.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/sequence.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/sequence.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/set_operations.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/set_operations.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/shuffle.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/shuffle.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/sort.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/swap_ranges.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/swap_ranges.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/tabulate.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/tabulate.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/tag.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/temporary_buffer.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/temporary_buffer.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/transform.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/transform.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/transform_reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/transform_reduce.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/transform_scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/transform_scan.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/uninitialized_copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/uninitialized_copy.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/uninitialized_fill.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/uninitialized_fill.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/unique.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/unique.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/unique_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/generic/unique_by_key.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/internal/decompose.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/adjacent_difference.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/assign_value.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/binary_search.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/copy.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/copy_backward.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/copy_if.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/count.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/equal.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/execution_policy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/extrema.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/fill.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/find.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/for_each.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/gather.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/general_copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/generate.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/get_value.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/inner_product.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/insertion_sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/iter_swap.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/logical.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/malloc_and_free.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/merge.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/merge.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/mismatch.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/partition.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/per_device_resource.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/reduce_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/remove.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/replace.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/reverse.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/scan_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/scatter.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/sequence.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/set_operations.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/sort.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/stable_merge_sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/stable_merge_sort.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/stable_primitive_sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/stable_primitive_sort.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/stable_radix_sort.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/stable_radix_sort.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/swap_ranges.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/tabulate.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/temporary_buffer.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/transform.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/transform_reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/transform_scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/trivial_copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/uninitialized_copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/uninitialized_fill.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/unique.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/sequential/unique_by_key.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/detail/system_error.inl create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/error_code.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system/system_error.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/system_error.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/tabulate.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/transform.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/transform_reduce.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/transform_scan.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/tuple.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/type_traits/integer_sequence.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/type_traits/is_contiguous_iterator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/type_traits/is_execution_policy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/type_traits/is_operator_less_or_greater_function_object.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/type_traits/is_operator_plus_function_object.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/type_traits/is_trivially_relocatable.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/type_traits/logical_metafunctions.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/type_traits/remove_cvref.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/type_traits/void_t.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/uninitialized_copy.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/uninitialized_fill.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/unique.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/universal_allocator.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/universal_ptr.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/universal_vector.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/version.h create mode 100644 thirdparty/manifold/thirdparty/thrust/thrust/zip_function.h diff --git a/modules/csg/SCsub b/modules/csg/SCsub index 1cf9974fc138..87ffb8e3fa8b 100644 --- a/modules/csg/SCsub +++ b/modules/csg/SCsub @@ -6,6 +6,65 @@ Import("env_modules") env_csg = env_modules.Clone() # Godot source files + +if env_csg["disable_exceptions"]: + # Enable exceptions for only manifold. + if env_csg.msvc: + env_csg.Append(CPPDEFINES=[("_HAS_EXCEPTIONS", 1)], CCFLAGS=["/EHsc"]) + else: + env_csg.Append(CXXFLAGS=["-fexceptions"]) + + +thirdparty_dir = "#thirdparty/manifold/" +thirdparty_sources = [ + thirdparty_dir + file + for file in [ + "src/polygon/src/polygon.cpp", + "src/manifold/src/constructors.cpp", + "src/manifold/src/edge_op.cpp", + "src/manifold/src/face_op.cpp", + "src/manifold/src/impl.cpp", + "src/manifold/src/boolean_result.cpp", + "src/manifold/src/boolean3.cpp", + "src/manifold/src/manifold.cpp", + "src/manifold/src/properties.cpp", + "src/manifold/src/smoothing.cpp", + "src/manifold/src/sort.cpp", + "src/collider/src/collider.cpp", + "src/manifold/src/subdivision.cpp", + "src/manifold/src/csg_tree.cpp", + "src/cross_section/src/cross_section.cpp", + "src/third_party/quickhull/QuickHull.cpp", + ] +] + +env_csg.Append(CPPDEFINES=["THRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_CPP"]) +if env["platform"] == "web": + env_csg.Append(CPPDEFINES=["_LIBCUDACXX_HAS_THREAD_API_EXTERNAL", "_LIBCUDACXX_HAS_THREAD_API_CUDA"]) + env_csg.Append(CCFLAGS=["-sDISABLE_EXCEPTION_CATCHING=0"]) + env_csg.Append(LINKFLAGS=["-sERROR_ON_UNDEFINED_SYMBOLS=0"]) + +env_csg.Prepend( + CPPPATH=[ + thirdparty_dir + path + for path in [ + "src/third_party/quickhull", + "src/manifold/include", + "src/utilities/include", + "src/cross_section/include", + "src/polygon/include", + "src/collider/include", + "thirdparty/thrust/dependencies/libcudacxx/include", + "thirdparty/thrust", + "thirdparty/glm", + ] + ] +) + +env_thirdparty = env_csg.Clone() +env_thirdparty.disable_warnings() +env_thirdparty.add_source_files(env.modules_sources, thirdparty_sources) + env_csg.add_source_files(env.modules_sources, "*.cpp") if env.editor_build: env_csg.add_source_files(env.modules_sources, "editor/*.cpp") diff --git a/modules/csg/csg.cpp b/modules/csg/csg.cpp deleted file mode 100644 index a4a3c768e94f..000000000000 --- a/modules/csg/csg.cpp +++ /dev/null @@ -1,1547 +0,0 @@ -/**************************************************************************/ -/* csg.cpp */ -/**************************************************************************/ -/* This file is part of: */ -/* GODOT ENGINE */ -/* https://godotengine.org */ -/**************************************************************************/ -/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */ -/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */ -/* */ -/* Permission is hereby granted, free of charge, to any person obtaining */ -/* a copy of this software and associated documentation files (the */ -/* "Software"), to deal in the Software without restriction, including */ -/* without limitation the rights to use, copy, modify, merge, publish, */ -/* distribute, sublicense, and/or sell copies of the Software, and to */ -/* permit persons to whom the Software is furnished to do so, subject to */ -/* the following conditions: */ -/* */ -/* The above copyright notice and this permission notice shall be */ -/* included in all copies or substantial portions of the Software. */ -/* */ -/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ -/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ -/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */ -/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ -/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ -/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ -/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -/**************************************************************************/ - -#include "csg.h" - -#include "core/math/geometry_2d.h" -#include "core/math/math_funcs.h" -#include "core/templates/sort_array.h" - -// Static helper functions. - -inline static bool is_snapable(const Vector3 &p_point1, const Vector3 &p_point2, real_t p_distance) { - return p_point2.distance_squared_to(p_point1) < p_distance * p_distance; -} - -inline static Vector2 interpolate_segment_uv(const Vector2 p_segment_points[2], const Vector2 p_uvs[2], const Vector2 &p_interpolation_point) { - if (p_segment_points[0].is_equal_approx(p_segment_points[1])) { - return p_uvs[0]; - } - - float segment_length = p_segment_points[0].distance_to(p_segment_points[1]); - float distance = p_segment_points[0].distance_to(p_interpolation_point); - float fraction = distance / segment_length; - - return p_uvs[0].lerp(p_uvs[1], fraction); -} - -inline static Vector2 interpolate_triangle_uv(const Vector2 p_vertices[3], const Vector2 p_uvs[3], const Vector2 &p_interpolation_point) { - if (p_interpolation_point.is_equal_approx(p_vertices[0])) { - return p_uvs[0]; - } - if (p_interpolation_point.is_equal_approx(p_vertices[1])) { - return p_uvs[1]; - } - if (p_interpolation_point.is_equal_approx(p_vertices[2])) { - return p_uvs[2]; - } - - Vector2 edge1 = p_vertices[1] - p_vertices[0]; - Vector2 edge2 = p_vertices[2] - p_vertices[0]; - Vector2 interpolation = p_interpolation_point - p_vertices[0]; - - float edge1_on_edge1 = edge1.dot(edge1); - float edge1_on_edge2 = edge1.dot(edge2); - float edge2_on_edge2 = edge2.dot(edge2); - float inter_on_edge1 = interpolation.dot(edge1); - float inter_on_edge2 = interpolation.dot(edge2); - float scale = (edge1_on_edge1 * edge2_on_edge2 - edge1_on_edge2 * edge1_on_edge2); - if (scale == 0) { - return p_uvs[0]; - } - - float v = (edge2_on_edge2 * inter_on_edge1 - edge1_on_edge2 * inter_on_edge2) / scale; - float w = (edge1_on_edge1 * inter_on_edge2 - edge1_on_edge2 * inter_on_edge1) / scale; - float u = 1.0f - v - w; - - return p_uvs[0] * u + p_uvs[1] * v + p_uvs[2] * w; -} - -static inline bool ray_intersects_triangle(const Vector3 &p_from, const Vector3 &p_dir, const Vector3 p_vertices[3], float p_tolerance, Vector3 &r_intersection_point) { - Vector3 edge1 = p_vertices[1] - p_vertices[0]; - Vector3 edge2 = p_vertices[2] - p_vertices[0]; - Vector3 h = p_dir.cross(edge2); - real_t a = edge1.dot(h); - // Check if ray is parallel to triangle. - if (Math::is_zero_approx(a)) { - return false; - } - real_t f = 1.0 / a; - - Vector3 s = p_from - p_vertices[0]; - real_t u = f * s.dot(h); - if (u < 0.0 - p_tolerance || u > 1.0 + p_tolerance) { - return false; - } - - Vector3 q = s.cross(edge1); - real_t v = f * p_dir.dot(q); - if (v < 0.0 - p_tolerance || u + v > 1.0 + p_tolerance) { - return false; - } - - // Ray intersects triangle. - // Calculate distance. - real_t t = f * edge2.dot(q); - // Confirm triangle is in front of ray. - if (t >= p_tolerance) { - r_intersection_point = p_from + p_dir * t; - return true; - } else { - return false; - } -} - -inline bool is_point_in_triangle(const Vector3 &p_point, const Vector3 p_vertices[3], int p_shifted = 0) { - real_t det = p_vertices[0].dot(p_vertices[1].cross(p_vertices[2])); - - // If determinant is, zero try shift the triangle and the point. - if (Math::is_zero_approx(det)) { - if (p_shifted > 2) { - // Triangle appears degenerate, so ignore it. - return false; - } - Vector3 shift_by; - shift_by[p_shifted] = 1; - Vector3 shifted_point = p_point + shift_by; - Vector3 shifted_vertices[3] = { p_vertices[0] + shift_by, p_vertices[1] + shift_by, p_vertices[2] + shift_by }; - return is_point_in_triangle(shifted_point, shifted_vertices, p_shifted + 1); - } - - // Find the barycentric coordinates of the point with respect to the vertices. - real_t lambda[3]; - lambda[0] = p_vertices[1].cross(p_vertices[2]).dot(p_point) / det; - lambda[1] = p_vertices[2].cross(p_vertices[0]).dot(p_point) / det; - lambda[2] = p_vertices[0].cross(p_vertices[1]).dot(p_point) / det; - - // Point is in the plane if all lambdas sum to 1. - if (!Math::is_equal_approx(lambda[0] + lambda[1] + lambda[2], 1)) { - return false; - } - - // Point is inside the triangle if all lambdas are positive. - if (lambda[0] < 0 || lambda[1] < 0 || lambda[2] < 0) { - return false; - } - - return true; -} - -inline static bool is_triangle_degenerate(const Vector2 p_vertices[3], real_t p_vertex_snap2) { - real_t det = p_vertices[0].x * p_vertices[1].y - p_vertices[0].x * p_vertices[2].y + - p_vertices[0].y * p_vertices[2].x - p_vertices[0].y * p_vertices[1].x + - p_vertices[1].x * p_vertices[2].y - p_vertices[1].y * p_vertices[2].x; - - return det < p_vertex_snap2; -} - -inline static bool are_segments_parallel(const Vector2 p_segment1_points[2], const Vector2 p_segment2_points[2], float p_vertex_snap2) { - Vector2 segment1 = p_segment1_points[1] - p_segment1_points[0]; - Vector2 segment2 = p_segment2_points[1] - p_segment2_points[0]; - real_t segment1_length2 = segment1.dot(segment1); - real_t segment2_length2 = segment2.dot(segment2); - real_t segment_onto_segment = segment2.dot(segment1); - - if (segment1_length2 < p_vertex_snap2 || segment2_length2 < p_vertex_snap2) { - return true; - } - - real_t max_separation2; - if (segment1_length2 > segment2_length2) { - max_separation2 = segment2_length2 - segment_onto_segment * segment_onto_segment / segment1_length2; - } else { - max_separation2 = segment1_length2 - segment_onto_segment * segment_onto_segment / segment2_length2; - } - - return max_separation2 < p_vertex_snap2; -} - -// CSGBrush - -void CSGBrush::_regen_face_aabbs() { - for (int i = 0; i < faces.size(); i++) { - faces.write[i].aabb = AABB(); - faces.write[i].aabb.position = faces[i].vertices[0]; - faces.write[i].aabb.expand_to(faces[i].vertices[1]); - faces.write[i].aabb.expand_to(faces[i].vertices[2]); - } -} - -void CSGBrush::build_from_faces(const Vector &p_vertices, const Vector &p_uvs, const Vector &p_smooth, const Vector> &p_materials, const Vector &p_flip_faces) { - faces.clear(); - - int vc = p_vertices.size(); - - ERR_FAIL_COND((vc % 3) != 0); - - const Vector3 *rv = p_vertices.ptr(); - int uvc = p_uvs.size(); - const Vector2 *ruv = p_uvs.ptr(); - int sc = p_smooth.size(); - const bool *rs = p_smooth.ptr(); - int mc = p_materials.size(); - const Ref *rm = p_materials.ptr(); - int ic = p_flip_faces.size(); - const bool *ri = p_flip_faces.ptr(); - - HashMap, int> material_map; - - faces.resize(p_vertices.size() / 3); - - for (int i = 0; i < faces.size(); i++) { - Face &f = faces.write[i]; - f.vertices[0] = rv[i * 3 + 0]; - f.vertices[1] = rv[i * 3 + 1]; - f.vertices[2] = rv[i * 3 + 2]; - - if (uvc == vc) { - f.uvs[0] = ruv[i * 3 + 0]; - f.uvs[1] = ruv[i * 3 + 1]; - f.uvs[2] = ruv[i * 3 + 2]; - } - - if (sc == vc / 3) { - f.smooth = rs[i]; - } else { - f.smooth = false; - } - - if (ic == vc / 3) { - f.invert = ri[i]; - } else { - f.invert = false; - } - - if (mc == vc / 3) { - Ref mat = rm[i]; - if (mat.is_valid()) { - HashMap, int>::ConstIterator E = material_map.find(mat); - - if (E) { - f.material = E->value; - } else { - f.material = material_map.size(); - material_map[mat] = f.material; - } - - } else { - f.material = -1; - } - } - } - - materials.resize(material_map.size()); - for (const KeyValue, int> &E : material_map) { - materials.write[E.value] = E.key; - } - - _regen_face_aabbs(); -} - -void CSGBrush::copy_from(const CSGBrush &p_brush, const Transform3D &p_xform) { - faces = p_brush.faces; - materials = p_brush.materials; - - for (int i = 0; i < faces.size(); i++) { - for (int j = 0; j < 3; j++) { - faces.write[i].vertices[j] = p_xform.xform(p_brush.faces[i].vertices[j]); - } - } - - _regen_face_aabbs(); -} - -// CSGBrushOperation - -void CSGBrushOperation::merge_brushes(Operation p_operation, const CSGBrush &p_brush_a, const CSGBrush &p_brush_b, CSGBrush &r_merged_brush, float p_vertex_snap) { - // Check for face collisions and add necessary faces. - Build2DFaceCollection build2DFaceCollection; - for (int i = 0; i < p_brush_a.faces.size(); i++) { - for (int j = 0; j < p_brush_b.faces.size(); j++) { - if (p_brush_a.faces[i].aabb.intersects_inclusive(p_brush_b.faces[j].aabb)) { - update_faces(p_brush_a, i, p_brush_b, j, build2DFaceCollection, p_vertex_snap); - } - } - } - - // Add faces to MeshMerge. - MeshMerge mesh_merge; - mesh_merge.vertex_snap = p_vertex_snap; - - for (int i = 0; i < p_brush_a.faces.size(); i++) { - Ref material; - if (p_brush_a.faces[i].material != -1) { - material = p_brush_a.materials[p_brush_a.faces[i].material]; - } - - if (build2DFaceCollection.build2DFacesA.has(i)) { - build2DFaceCollection.build2DFacesA[i].addFacesToMesh(mesh_merge, p_brush_a.faces[i].smooth, p_brush_a.faces[i].invert, material, false); - } else { - Vector3 points[3]; - Vector2 uvs[3]; - for (int j = 0; j < 3; j++) { - points[j] = p_brush_a.faces[i].vertices[j]; - uvs[j] = p_brush_a.faces[i].uvs[j]; - } - mesh_merge.add_face(points, uvs, p_brush_a.faces[i].smooth, p_brush_a.faces[i].invert, material, false); - } - } - - for (int i = 0; i < p_brush_b.faces.size(); i++) { - Ref material; - if (p_brush_b.faces[i].material != -1) { - material = p_brush_b.materials[p_brush_b.faces[i].material]; - } - - if (build2DFaceCollection.build2DFacesB.has(i)) { - build2DFaceCollection.build2DFacesB[i].addFacesToMesh(mesh_merge, p_brush_b.faces[i].smooth, p_brush_b.faces[i].invert, material, true); - } else { - Vector3 points[3]; - Vector2 uvs[3]; - for (int j = 0; j < 3; j++) { - points[j] = p_brush_b.faces[i].vertices[j]; - uvs[j] = p_brush_b.faces[i].uvs[j]; - } - mesh_merge.add_face(points, uvs, p_brush_b.faces[i].smooth, p_brush_b.faces[i].invert, material, true); - } - } - - // Mark faces that ended up inside the intersection. - mesh_merge.mark_inside_faces(); - - // Create new brush and fill with new faces. - r_merged_brush.faces.clear(); - - switch (p_operation) { - case OPERATION_UNION: { - int outside_count = 0; - - for (int i = 0; i < mesh_merge.faces.size(); i++) { - if (mesh_merge.faces[i].inside) { - continue; - } - outside_count++; - } - - r_merged_brush.faces.resize(outside_count); - - outside_count = 0; - - for (int i = 0; i < mesh_merge.faces.size(); i++) { - if (mesh_merge.faces[i].inside) { - continue; - } - - for (int j = 0; j < 3; j++) { - r_merged_brush.faces.write[outside_count].vertices[j] = mesh_merge.points[mesh_merge.faces[i].points[j]]; - r_merged_brush.faces.write[outside_count].uvs[j] = mesh_merge.faces[i].uvs[j]; - } - - r_merged_brush.faces.write[outside_count].smooth = mesh_merge.faces[i].smooth; - r_merged_brush.faces.write[outside_count].invert = mesh_merge.faces[i].invert; - r_merged_brush.faces.write[outside_count].material = mesh_merge.faces[i].material_idx; - outside_count++; - } - - r_merged_brush._regen_face_aabbs(); - - } break; - - case OPERATION_INTERSECTION: { - int inside_count = 0; - - for (int i = 0; i < mesh_merge.faces.size(); i++) { - if (!mesh_merge.faces[i].inside) { - continue; - } - inside_count++; - } - - r_merged_brush.faces.resize(inside_count); - - inside_count = 0; - - for (int i = 0; i < mesh_merge.faces.size(); i++) { - if (!mesh_merge.faces[i].inside) { - continue; - } - - for (int j = 0; j < 3; j++) { - r_merged_brush.faces.write[inside_count].vertices[j] = mesh_merge.points[mesh_merge.faces[i].points[j]]; - r_merged_brush.faces.write[inside_count].uvs[j] = mesh_merge.faces[i].uvs[j]; - } - - r_merged_brush.faces.write[inside_count].smooth = mesh_merge.faces[i].smooth; - r_merged_brush.faces.write[inside_count].invert = mesh_merge.faces[i].invert; - r_merged_brush.faces.write[inside_count].material = mesh_merge.faces[i].material_idx; - inside_count++; - } - - r_merged_brush._regen_face_aabbs(); - - } break; - - case OPERATION_SUBTRACTION: { - int face_count = 0; - - for (int i = 0; i < mesh_merge.faces.size(); i++) { - if (mesh_merge.faces[i].from_b && !mesh_merge.faces[i].inside) { - continue; - } - if (!mesh_merge.faces[i].from_b && mesh_merge.faces[i].inside) { - continue; - } - face_count++; - } - - r_merged_brush.faces.resize(face_count); - - face_count = 0; - - for (int i = 0; i < mesh_merge.faces.size(); i++) { - if (mesh_merge.faces[i].from_b && !mesh_merge.faces[i].inside) { - continue; - } - if (!mesh_merge.faces[i].from_b && mesh_merge.faces[i].inside) { - continue; - } - - for (int j = 0; j < 3; j++) { - r_merged_brush.faces.write[face_count].vertices[j] = mesh_merge.points[mesh_merge.faces[i].points[j]]; - r_merged_brush.faces.write[face_count].uvs[j] = mesh_merge.faces[i].uvs[j]; - } - - if (mesh_merge.faces[i].from_b) { - //invert facing of insides of B - SWAP(r_merged_brush.faces.write[face_count].vertices[1], r_merged_brush.faces.write[face_count].vertices[2]); - SWAP(r_merged_brush.faces.write[face_count].uvs[1], r_merged_brush.faces.write[face_count].uvs[2]); - } - - r_merged_brush.faces.write[face_count].smooth = mesh_merge.faces[i].smooth; - r_merged_brush.faces.write[face_count].invert = mesh_merge.faces[i].invert; - r_merged_brush.faces.write[face_count].material = mesh_merge.faces[i].material_idx; - face_count++; - } - - r_merged_brush._regen_face_aabbs(); - - } break; - } - - // Update the list of materials. - r_merged_brush.materials.resize(mesh_merge.materials.size()); - for (const KeyValue, int> &E : mesh_merge.materials) { - r_merged_brush.materials.write[E.value] = E.key; - } -} - -// CSGBrushOperation::MeshMerge - -// Use a limit to speed up bvh and limit the depth. -#define BVH_LIMIT 8 - -int CSGBrushOperation::MeshMerge::_create_bvh(FaceBVH *r_facebvhptr, FaceBVH **r_facebvhptrptr, int p_from, int p_size, int p_depth, int &r_max_depth, int &r_max_alloc) { - if (p_depth > r_max_depth) { - r_max_depth = p_depth; - } - - if (p_size == 0) { - return -1; - } - - if (p_size <= BVH_LIMIT) { - for (int i = 0; i < p_size - 1; i++) { - r_facebvhptrptr[p_from + i]->next = r_facebvhptrptr[p_from + i + 1] - r_facebvhptr; - } - return r_facebvhptrptr[p_from] - r_facebvhptr; - } - - AABB aabb; - aabb = r_facebvhptrptr[p_from]->aabb; - for (int i = 1; i < p_size; i++) { - aabb.merge_with(r_facebvhptrptr[p_from + i]->aabb); - } - - int li = aabb.get_longest_axis_index(); - - switch (li) { - case Vector3::AXIS_X: { - SortArray sort_x; - sort_x.nth_element(0, p_size, p_size / 2, &r_facebvhptrptr[p_from]); - //sort_x.sort(&p_bb[p_from],p_size); - } break; - - case Vector3::AXIS_Y: { - SortArray sort_y; - sort_y.nth_element(0, p_size, p_size / 2, &r_facebvhptrptr[p_from]); - //sort_y.sort(&p_bb[p_from],p_size); - } break; - - case Vector3::AXIS_Z: { - SortArray sort_z; - sort_z.nth_element(0, p_size, p_size / 2, &r_facebvhptrptr[p_from]); - //sort_z.sort(&p_bb[p_from],p_size); - } break; - } - - int left = _create_bvh(r_facebvhptr, r_facebvhptrptr, p_from, p_size / 2, p_depth + 1, r_max_depth, r_max_alloc); - int right = _create_bvh(r_facebvhptr, r_facebvhptrptr, p_from + p_size / 2, p_size - p_size / 2, p_depth + 1, r_max_depth, r_max_alloc); - - int index = r_max_alloc++; - FaceBVH *_new = &r_facebvhptr[index]; - _new->aabb = aabb; - _new->center = aabb.get_center(); - _new->face = -1; - _new->left = left; - _new->right = right; - _new->next = -1; - - return index; -} - -void CSGBrushOperation::MeshMerge::_add_distance(List &r_intersectionsA, List &r_intersectionsB, bool p_from_B, real_t p_distance_squared, bool p_is_conormal) const { - List &intersections = p_from_B ? r_intersectionsB : r_intersectionsA; - - // Check if distance exists. - for (const IntersectionDistance E : intersections) { - if (E.is_conormal == p_is_conormal && Math::is_equal_approx(E.distance_squared, p_distance_squared)) { - return; - } - } - IntersectionDistance distance; - distance.is_conormal = p_is_conormal; - distance.distance_squared = p_distance_squared; - intersections.push_back(distance); -} - -bool CSGBrushOperation::MeshMerge::_bvh_inside(FaceBVH *r_facebvhptr, int p_max_depth, int p_bvh_first, int p_face_idx) const { - Face face = faces[p_face_idx]; - Vector3 face_points[3] = { - points[face.points[0]], - points[face.points[1]], - points[face.points[2]] - }; - Vector3 face_center = (face_points[0] + face_points[1] + face_points[2]) / 3.0; - Vector3 face_normal = Plane(face_points[0], face_points[1], face_points[2]).normal; - - uint32_t *stack = (uint32_t *)alloca(sizeof(int) * p_max_depth); - - enum { - TEST_AABB_BIT = 0, - VISIT_LEFT_BIT = 1, - VISIT_RIGHT_BIT = 2, - VISIT_DONE_BIT = 3, - VISITED_BIT_SHIFT = 29, - NODE_IDX_MASK = (1 << VISITED_BIT_SHIFT) - 1, - VISITED_BIT_MASK = ~NODE_IDX_MASK - }; - - List intersectionsA; - List intersectionsB; - - Intersection closest_intersection; - closest_intersection.found = false; - - int level = 0; - int pos = p_bvh_first; - stack[0] = pos; - - while (true) { - uint32_t node = stack[level] & NODE_IDX_MASK; - const FaceBVH *current_facebvhptr = &(r_facebvhptr[node]); - bool done = false; - - switch (stack[level] >> VISITED_BIT_SHIFT) { - case TEST_AABB_BIT: { - if (current_facebvhptr->face >= 0) { - while (current_facebvhptr) { - if (p_face_idx != current_facebvhptr->face && - current_facebvhptr->aabb.intersects_ray(face_center, face_normal)) { - const Face ¤t_face = faces[current_facebvhptr->face]; - Vector3 current_points[3] = { - points[current_face.points[0]], - points[current_face.points[1]], - points[current_face.points[2]] - }; - Vector3 current_normal = Plane(current_points[0], current_points[1], current_points[2]).normal; - Vector3 intersection_point; - // Check if faces are co-planar. - if (current_normal.is_equal_approx(face_normal) && - is_point_in_triangle(face_center, current_points)) { - // Only add an intersection if not a B face. - if (!face.from_b) { - _add_distance(intersectionsA, intersectionsB, current_face.from_b, 0, true); - } - } else if (ray_intersects_triangle(face_center, face_normal, current_points, CMP_EPSILON, intersection_point)) { - real_t distance_squared = face_center.distance_squared_to(intersection_point); - real_t inner = current_normal.dot(face_normal); - // If the faces are perpendicular, ignore this face. - // The triangles on the side should be intersected and result in the correct behavior. - if (!Math::is_zero_approx(inner)) { - _add_distance(intersectionsA, intersectionsB, current_face.from_b, distance_squared, inner > 0.0f); - } - } - - if (face.from_b != current_face.from_b) { - if (current_normal.is_equal_approx(face_normal) && - is_point_in_triangle(face_center, current_points)) { - // Only add an intersection if not a B face. - if (!face.from_b) { - closest_intersection.found = true; - closest_intersection.conormal = 1.0f; - closest_intersection.distance_squared = 0.0f; - closest_intersection.origin_angle = -FLT_MAX; - } - } else if (ray_intersects_triangle(face_center, face_normal, current_points, CMP_EPSILON, intersection_point)) { - Intersection potential_intersection; - potential_intersection.found = true; - potential_intersection.conormal = face_normal.dot(current_normal); - potential_intersection.distance_squared = face_center.distance_squared_to(intersection_point); - potential_intersection.origin_angle = Math::abs(potential_intersection.conormal); - real_t intersection_dist_from_face = face_normal.dot(intersection_point - face_center); - for (int i = 0; i < 3; i++) { - real_t point_dist_from_face = face_normal.dot(current_points[i] - face_center); - if (!Math::is_equal_approx(point_dist_from_face, intersection_dist_from_face) && - point_dist_from_face < intersection_dist_from_face) { - potential_intersection.origin_angle = -potential_intersection.origin_angle; - break; - } - } - if (potential_intersection.conormal != 0.0f) { - if (!closest_intersection.found) { - closest_intersection = potential_intersection; - } else if (!Math::is_equal_approx(potential_intersection.distance_squared, closest_intersection.distance_squared) && - potential_intersection.distance_squared < closest_intersection.distance_squared) { - closest_intersection = potential_intersection; - } else if (Math::is_equal_approx(potential_intersection.distance_squared, closest_intersection.distance_squared)) { - if (potential_intersection.origin_angle < closest_intersection.origin_angle) { - closest_intersection = potential_intersection; - } - } - } - } - } - } - - if (current_facebvhptr->next != -1) { - current_facebvhptr = &r_facebvhptr[current_facebvhptr->next]; - } else { - current_facebvhptr = nullptr; - } - } - - stack[level] = (VISIT_DONE_BIT << VISITED_BIT_SHIFT) | node; - - } else { - bool valid = current_facebvhptr->aabb.intersects_ray(face_center, face_normal); - - if (!valid) { - stack[level] = (VISIT_DONE_BIT << VISITED_BIT_SHIFT) | node; - } else { - stack[level] = (VISIT_LEFT_BIT << VISITED_BIT_SHIFT) | node; - } - } - continue; - } - - case VISIT_LEFT_BIT: { - stack[level] = (VISIT_RIGHT_BIT << VISITED_BIT_SHIFT) | node; - stack[level + 1] = current_facebvhptr->left | TEST_AABB_BIT; - level++; - continue; - } - - case VISIT_RIGHT_BIT: { - stack[level] = (VISIT_DONE_BIT << VISITED_BIT_SHIFT) | node; - stack[level + 1] = current_facebvhptr->right | TEST_AABB_BIT; - level++; - continue; - } - - case VISIT_DONE_BIT: { - if (level == 0) { - done = true; - break; - } else { - level--; - } - continue; - } - } - - if (done) { - break; - } - } - - if (!closest_intersection.found) { - return false; - } else { - return closest_intersection.conormal > 0.0f; - } -} - -void CSGBrushOperation::MeshMerge::mark_inside_faces() { - // Mark faces that are inside. This helps later do the boolean ops when merging. - // This approach is very brute force with a bunch of optimizations, - // such as BVH and pre AABB intersection test. - - Vector bvhvec; - bvhvec.resize(faces.size() * 3); // Will never be larger than this (TODO: Make better) - FaceBVH *facebvh = bvhvec.ptrw(); - - AABB aabb_a; - AABB aabb_b; - - bool first_a = true; - bool first_b = true; - - for (int i = 0; i < faces.size(); i++) { - facebvh[i].left = -1; - facebvh[i].right = -1; - facebvh[i].face = i; - facebvh[i].aabb.position = points[faces[i].points[0]]; - facebvh[i].aabb.expand_to(points[faces[i].points[1]]); - facebvh[i].aabb.expand_to(points[faces[i].points[2]]); - facebvh[i].center = facebvh[i].aabb.get_center(); - facebvh[i].aabb.grow_by(vertex_snap); - facebvh[i].next = -1; - - if (faces[i].from_b) { - if (first_b) { - aabb_b = facebvh[i].aabb; - first_b = false; - } else { - aabb_b.merge_with(facebvh[i].aabb); - } - } else { - if (first_a) { - aabb_a = facebvh[i].aabb; - first_a = false; - } else { - aabb_a.merge_with(facebvh[i].aabb); - } - } - } - - AABB intersection_aabb = aabb_a.intersection(aabb_b); - - // Check if shape AABBs intersect. - if (intersection_aabb.size == Vector3()) { - return; - } - - Vector bvhtrvec; - bvhtrvec.resize(faces.size()); - FaceBVH **bvhptr = bvhtrvec.ptrw(); - for (int i = 0; i < faces.size(); i++) { - bvhptr[i] = &facebvh[i]; - } - - int max_depth = 0; - int max_alloc = faces.size(); - _create_bvh(facebvh, bvhptr, 0, faces.size(), 1, max_depth, max_alloc); - - for (int i = 0; i < faces.size(); i++) { - // Check if face AABB intersects the intersection AABB. - if (!intersection_aabb.intersects_inclusive(facebvh[i].aabb)) { - continue; - } - - if (_bvh_inside(facebvh, max_depth, max_alloc - 1, i)) { - faces.write[i].inside = true; - } - } -} - -void CSGBrushOperation::MeshMerge::add_face(const Vector3 p_points[3], const Vector2 p_uvs[3], bool p_smooth, bool p_invert, const Ref &p_material, bool p_from_b) { - int indices[3]; - for (int i = 0; i < 3; i++) { - VertexKey vk; - vk.x = int((double(p_points[i].x) + double(vertex_snap) * 0.31234) / double(vertex_snap)); - vk.y = int((double(p_points[i].y) + double(vertex_snap) * 0.31234) / double(vertex_snap)); - vk.z = int((double(p_points[i].z) + double(vertex_snap) * 0.31234) / double(vertex_snap)); - - int res; - if (snap_cache.lookup(vk, res)) { - indices[i] = res; - } else { - indices[i] = points.size(); - points.push_back(p_points[i]); - snap_cache.set(vk, indices[i]); - } - } - - // Don't add degenerate faces. - if (indices[0] == indices[2] || indices[0] == indices[1] || indices[1] == indices[2]) { - return; - } - - MeshMerge::Face face; - face.from_b = p_from_b; - face.inside = false; - face.smooth = p_smooth; - face.invert = p_invert; - - if (p_material.is_valid()) { - if (!materials.has(p_material)) { - face.material_idx = materials.size(); - materials[p_material] = face.material_idx; - } else { - face.material_idx = materials[p_material]; - } - } else { - face.material_idx = -1; - } - - for (int k = 0; k < 3; k++) { - face.points[k] = indices[k]; - face.uvs[k] = p_uvs[k]; - } - - faces.push_back(face); -} - -// CSGBrushOperation::Build2DFaces - -int CSGBrushOperation::Build2DFaces::_get_point_idx(const Vector2 &p_point) { - for (int vertex_idx = 0; vertex_idx < vertices.size(); ++vertex_idx) { - if (vertices[vertex_idx].point.distance_squared_to(p_point) < vertex_snap2) { - return vertex_idx; - } - } - return -1; -} - -int CSGBrushOperation::Build2DFaces::_add_vertex(const Vertex2D &p_vertex) { - // Check if vertex exists. - int vertex_id = _get_point_idx(p_vertex.point); - if (vertex_id != -1) { - return vertex_id; - } - - vertices.push_back(p_vertex); - return vertices.size() - 1; -} - -void CSGBrushOperation::Build2DFaces::_add_vertex_idx_sorted(Vector &r_vertex_indices, int p_new_vertex_index) { - if (p_new_vertex_index >= 0 && !r_vertex_indices.has(p_new_vertex_index)) { - ERR_FAIL_COND_MSG(p_new_vertex_index >= vertices.size(), "Invalid vertex index."); - - // The first vertex. - if (r_vertex_indices.size() == 0) { - // Simply add it. - r_vertex_indices.push_back(p_new_vertex_index); - return; - } - - // The second vertex. - if (r_vertex_indices.size() == 1) { - Vector2 first_point = vertices[r_vertex_indices[0]].point; - Vector2 new_point = vertices[p_new_vertex_index].point; - - // Sort along the axis with the greatest difference. - int axis = 0; - if (Math::abs(new_point.x - first_point.x) < Math::abs(new_point.y - first_point.y)) { - axis = 1; - } - - // Add it to the beginning or the end appropriately. - if (new_point[axis] < first_point[axis]) { - r_vertex_indices.insert(0, p_new_vertex_index); - } else { - r_vertex_indices.push_back(p_new_vertex_index); - } - - return; - } - - // Third or later vertices. - Vector2 first_point = vertices[r_vertex_indices[0]].point; - Vector2 last_point = vertices[r_vertex_indices[r_vertex_indices.size() - 1]].point; - Vector2 new_point = vertices[p_new_vertex_index].point; - - // Determine axis being sorted against i.e. the axis with the greatest difference. - int axis = 0; - if (Math::abs(last_point.x - first_point.x) < Math::abs(last_point.y - first_point.y)) { - axis = 1; - } - - // Insert the point at the appropriate index. - for (int insert_idx = 0; insert_idx < r_vertex_indices.size(); ++insert_idx) { - Vector2 insert_point = vertices[r_vertex_indices[insert_idx]].point; - if (new_point[axis] < insert_point[axis]) { - r_vertex_indices.insert(insert_idx, p_new_vertex_index); - return; - } - } - - // New largest, add it to the end. - r_vertex_indices.push_back(p_new_vertex_index); - } -} - -void CSGBrushOperation::Build2DFaces::_merge_faces(const Vector &p_segment_indices) { - int segments = p_segment_indices.size() - 1; - if (segments < 2) { - return; - } - - // Faces around an inner vertex are merged by moving the inner vertex to the first vertex. - for (int sorted_idx = 1; sorted_idx < segments; ++sorted_idx) { - int closest_idx = 0; - int inner_idx = p_segment_indices[sorted_idx]; - - if (sorted_idx > segments / 2) { - // Merge to other segment end. - closest_idx = segments; - // Reverse the merge order. - inner_idx = p_segment_indices[segments + segments / 2 - sorted_idx]; - } - - // Find the mergeable faces. - Vector merge_faces_idx; - Vector merge_faces; - Vector merge_faces_inner_vertex_idx; - for (int face_idx = 0; face_idx < faces.size(); ++face_idx) { - for (int face_vertex_idx = 0; face_vertex_idx < 3; ++face_vertex_idx) { - if (faces[face_idx].vertex_idx[face_vertex_idx] == inner_idx) { - merge_faces_idx.push_back(face_idx); - merge_faces.push_back(faces[face_idx]); - merge_faces_inner_vertex_idx.push_back(face_vertex_idx); - } - } - } - - Vector degenerate_points; - - // Create the new faces. - for (int merge_idx = 0; merge_idx < merge_faces.size(); ++merge_idx) { - int outer_edge_idx[2]; - outer_edge_idx[0] = merge_faces[merge_idx].vertex_idx[(merge_faces_inner_vertex_idx[merge_idx] + 1) % 3]; - outer_edge_idx[1] = merge_faces[merge_idx].vertex_idx[(merge_faces_inner_vertex_idx[merge_idx] + 2) % 3]; - - // Skip flattened faces. - if (outer_edge_idx[0] == p_segment_indices[closest_idx] || - outer_edge_idx[1] == p_segment_indices[closest_idx]) { - continue; - } - - //Don't create degenerate triangles. - Vector2 edge1[2] = { - vertices[outer_edge_idx[0]].point, - vertices[p_segment_indices[closest_idx]].point - }; - Vector2 edge2[2] = { - vertices[outer_edge_idx[1]].point, - vertices[p_segment_indices[closest_idx]].point - }; - if (are_segments_parallel(edge1, edge2, vertex_snap2)) { - if (!degenerate_points.find(outer_edge_idx[0])) { - degenerate_points.push_back(outer_edge_idx[0]); - } - if (!degenerate_points.find(outer_edge_idx[1])) { - degenerate_points.push_back(outer_edge_idx[1]); - } - continue; - } - - // Create new faces. - Face2D new_face; - new_face.vertex_idx[0] = p_segment_indices[closest_idx]; - new_face.vertex_idx[1] = outer_edge_idx[0]; - new_face.vertex_idx[2] = outer_edge_idx[1]; - faces.push_back(new_face); - } - - // Delete the old faces in reverse index order. - merge_faces_idx.sort(); - merge_faces_idx.reverse(); - for (int i = 0; i < merge_faces_idx.size(); ++i) { - faces.remove_at(merge_faces_idx[i]); - } - - if (degenerate_points.size() == 0) { - continue; - } - - // Split faces using degenerate points. - for (int face_idx = 0; face_idx < faces.size(); ++face_idx) { - Face2D face = faces[face_idx]; - Vertex2D face_vertices[3] = { - vertices[face.vertex_idx[0]], - vertices[face.vertex_idx[1]], - vertices[face.vertex_idx[2]] - }; - Vector2 face_points[3] = { - face_vertices[0].point, - face_vertices[1].point, - face_vertices[2].point - }; - - for (int point_idx = 0; point_idx < degenerate_points.size(); ++point_idx) { - int degenerate_idx = degenerate_points[point_idx]; - Vector2 point_2D = vertices[degenerate_idx].point; - - // Check if point is existing face vertex. - bool existing = false; - for (int i = 0; i < 3; ++i) { - if (face_vertices[i].point.distance_squared_to(point_2D) < vertex_snap2) { - existing = true; - break; - } - } - if (existing) { - continue; - } - - // Check if point is on each edge. - for (int face_edge_idx = 0; face_edge_idx < 3; ++face_edge_idx) { - Vector2 edge_points[2] = { - face_points[face_edge_idx], - face_points[(face_edge_idx + 1) % 3] - }; - Vector2 closest_point = Geometry2D::get_closest_point_to_segment(point_2D, edge_points); - - if (point_2D.distance_squared_to(closest_point) < vertex_snap2) { - int opposite_vertex_idx = face.vertex_idx[(face_edge_idx + 2) % 3]; - - // If new vertex snaps to degenerate vertex, just delete this face. - if (degenerate_idx == opposite_vertex_idx) { - faces.remove_at(face_idx); - // Update index. - --face_idx; - break; - } - - // Create two new faces around the new edge and remove this face. - // The new edge is the last edge. - Face2D left_face; - left_face.vertex_idx[0] = degenerate_idx; - left_face.vertex_idx[1] = face.vertex_idx[(face_edge_idx + 1) % 3]; - left_face.vertex_idx[2] = opposite_vertex_idx; - Face2D right_face; - right_face.vertex_idx[0] = opposite_vertex_idx; - right_face.vertex_idx[1] = face.vertex_idx[face_edge_idx]; - right_face.vertex_idx[2] = degenerate_idx; - faces.remove_at(face_idx); - faces.insert(face_idx, right_face); - faces.insert(face_idx, left_face); - - // Don't check against the new faces. - ++face_idx; - - // No need to check other edges. - break; - } - } - } - } - } -} - -void CSGBrushOperation::Build2DFaces::_find_edge_intersections(const Vector2 p_segment_points[2], Vector &r_segment_indices) { - LocalVector> processed_edges; - - // For each face. - for (int face_idx = 0; face_idx < faces.size(); ++face_idx) { - Face2D face = faces[face_idx]; - Vertex2D face_vertices[3] = { - vertices[face.vertex_idx[0]], - vertices[face.vertex_idx[1]], - vertices[face.vertex_idx[2]] - }; - - // Check each edge. - for (int face_edge_idx = 0; face_edge_idx < 3; ++face_edge_idx) { - Vector edge_points_and_uvs = { - face_vertices[face_edge_idx].point, - face_vertices[(face_edge_idx + 1) % 3].point, - face_vertices[face_edge_idx].uv, - face_vertices[(face_edge_idx + 1) % 3].uv - }; - - Vector2 edge_points[2] = { - edge_points_and_uvs[0], - edge_points_and_uvs[1], - }; - Vector2 edge_uvs[2] = { - edge_points_and_uvs[2], - edge_points_and_uvs[3], - }; - - // Check if edge has already been processed. - if (processed_edges.has(edge_points_and_uvs)) { - continue; - } - - processed_edges.push_back(edge_points_and_uvs); - - // First check if the ends of the segment are on the edge. - Vector2 intersection_point; - - bool on_edge = false; - for (int edge_point_idx = 0; edge_point_idx < 2; ++edge_point_idx) { - intersection_point = Geometry2D::get_closest_point_to_segment(p_segment_points[edge_point_idx], edge_points); - if (p_segment_points[edge_point_idx].distance_squared_to(intersection_point) < vertex_snap2) { - on_edge = true; - break; - } - } - - // Else check if the segment intersects the edge. - if (on_edge || Geometry2D::segment_intersects_segment(p_segment_points[0], p_segment_points[1], edge_points[0], edge_points[1], &intersection_point)) { - // Check if intersection point is an edge point. - if ((edge_points[0].distance_squared_to(intersection_point) < vertex_snap2) || - (edge_points[1].distance_squared_to(intersection_point) < vertex_snap2)) { - continue; - } - - // Check if edge exists, by checking if the intersecting segment is parallel to the edge. - if (are_segments_parallel(p_segment_points, edge_points, vertex_snap2)) { - continue; - } - - // Add the intersection point as a new vertex. - Vertex2D new_vertex; - new_vertex.point = intersection_point; - new_vertex.uv = interpolate_segment_uv(edge_points, edge_uvs, intersection_point); - int new_vertex_idx = _add_vertex(new_vertex); - int opposite_vertex_idx = face.vertex_idx[(face_edge_idx + 2) % 3]; - _add_vertex_idx_sorted(r_segment_indices, new_vertex_idx); - - // If new vertex snaps to opposite vertex, just delete this face. - if (new_vertex_idx == opposite_vertex_idx) { - faces.remove_at(face_idx); - // Update index. - --face_idx; - break; - } - - // If opposite point is on the segment, add its index to segment indices too. - Vector2 closest_point = Geometry2D::get_closest_point_to_segment(vertices[opposite_vertex_idx].point, p_segment_points); - if (vertices[opposite_vertex_idx].point.distance_squared_to(closest_point) < vertex_snap2) { - _add_vertex_idx_sorted(r_segment_indices, opposite_vertex_idx); - } - - // Create two new faces around the new edge and remove this face. - // The new edge is the last edge. - Face2D left_face; - left_face.vertex_idx[0] = new_vertex_idx; - left_face.vertex_idx[1] = face.vertex_idx[(face_edge_idx + 1) % 3]; - left_face.vertex_idx[2] = opposite_vertex_idx; - Face2D right_face; - right_face.vertex_idx[0] = opposite_vertex_idx; - right_face.vertex_idx[1] = face.vertex_idx[face_edge_idx]; - right_face.vertex_idx[2] = new_vertex_idx; - faces.remove_at(face_idx); - faces.insert(face_idx, right_face); - faces.insert(face_idx, left_face); - - // Check against the new faces. - --face_idx; - break; - } - } - } -} - -int CSGBrushOperation::Build2DFaces::_insert_point(const Vector2 &p_point) { - int new_vertex_idx = -1; - - for (int face_idx = 0; face_idx < faces.size(); ++face_idx) { - Face2D face = faces[face_idx]; - Vertex2D face_vertices[3] = { - vertices[face.vertex_idx[0]], - vertices[face.vertex_idx[1]], - vertices[face.vertex_idx[2]] - }; - Vector2 points[3] = { - face_vertices[0].point, - face_vertices[1].point, - face_vertices[2].point - }; - Vector2 uvs[3] = { - face_vertices[0].uv, - face_vertices[1].uv, - face_vertices[2].uv - }; - - // Skip degenerate triangles. - if (is_triangle_degenerate(points, vertex_snap2)) { - continue; - } - - // Check if point is existing face vertex. - for (int i = 0; i < 3; ++i) { - if (face_vertices[i].point.distance_squared_to(p_point) < vertex_snap2) { - return face.vertex_idx[i]; - } - } - - // Check if point is on each edge. - bool on_edge = false; - for (int face_edge_idx = 0; face_edge_idx < 3; ++face_edge_idx) { - Vector2 edge_points[2] = { - points[face_edge_idx], - points[(face_edge_idx + 1) % 3] - }; - Vector2 edge_uvs[2] = { - uvs[face_edge_idx], - uvs[(face_edge_idx + 1) % 3] - }; - - Vector2 closest_point = Geometry2D::get_closest_point_to_segment(p_point, edge_points); - if (p_point.distance_squared_to(closest_point) < vertex_snap2) { - on_edge = true; - - // Add the point as a new vertex. - Vertex2D new_vertex; - new_vertex.point = p_point; - new_vertex.uv = interpolate_segment_uv(edge_points, edge_uvs, p_point); - new_vertex_idx = _add_vertex(new_vertex); - int opposite_vertex_idx = face.vertex_idx[(face_edge_idx + 2) % 3]; - - // If new vertex snaps to opposite vertex, just delete this face. - if (new_vertex_idx == opposite_vertex_idx) { - faces.remove_at(face_idx); - // Update index. - --face_idx; - break; - } - - // Don't create degenerate triangles. - Vector2 split_edge1[2] = { vertices[new_vertex_idx].point, edge_points[0] }; - Vector2 split_edge2[2] = { vertices[new_vertex_idx].point, edge_points[1] }; - Vector2 new_edge[2] = { vertices[new_vertex_idx].point, vertices[opposite_vertex_idx].point }; - if (are_segments_parallel(split_edge1, new_edge, vertex_snap2) && - are_segments_parallel(split_edge2, new_edge, vertex_snap2)) { - break; - } - - // Create two new faces around the new edge and remove this face. - // The new edge is the last edge. - Face2D left_face; - left_face.vertex_idx[0] = new_vertex_idx; - left_face.vertex_idx[1] = face.vertex_idx[(face_edge_idx + 1) % 3]; - left_face.vertex_idx[2] = opposite_vertex_idx; - Face2D right_face; - right_face.vertex_idx[0] = opposite_vertex_idx; - right_face.vertex_idx[1] = face.vertex_idx[face_edge_idx]; - right_face.vertex_idx[2] = new_vertex_idx; - faces.remove_at(face_idx); - faces.insert(face_idx, right_face); - faces.insert(face_idx, left_face); - - // Don't check against the new faces. - ++face_idx; - - // No need to check other edges. - break; - } - } - - // If not on an edge, check if the point is inside the face. - if (!on_edge && Geometry2D::is_point_in_triangle(p_point, face_vertices[0].point, face_vertices[1].point, face_vertices[2].point)) { - // Add the point as a new vertex. - Vertex2D new_vertex; - new_vertex.point = p_point; - new_vertex.uv = interpolate_triangle_uv(points, uvs, p_point); - new_vertex_idx = _add_vertex(new_vertex); - - // Create three new faces around this point and remove this face. - // The new vertex is the last vertex. - for (int i = 0; i < 3; ++i) { - // Don't create degenerate triangles. - Vector2 new_points[3] = { points[i], points[(i + 1) % 3], vertices[new_vertex_idx].point }; - if (is_triangle_degenerate(new_points, vertex_snap2)) { - continue; - } - - Face2D new_face; - new_face.vertex_idx[0] = face.vertex_idx[i]; - new_face.vertex_idx[1] = face.vertex_idx[(i + 1) % 3]; - new_face.vertex_idx[2] = new_vertex_idx; - faces.push_back(new_face); - } - faces.remove_at(face_idx); - - // No need to check other faces. - break; - } - } - - return new_vertex_idx; -} - -void CSGBrushOperation::Build2DFaces::insert(const CSGBrush &p_brush, int p_face_idx) { - // Find edge points that cross the plane and face points that are in the plane. - // Map those points to 2D. - // Create new faces from those points. - - Vector2 points_2D[3]; - int points_count = 0; - - for (int i = 0; i < 3; i++) { - Vector3 point_3D = p_brush.faces[p_face_idx].vertices[i]; - - if (plane.has_point(point_3D)) { - // Point is in the plane, add it. - Vector3 point_2D = plane.project(point_3D); - point_2D = to_2D.xform(point_2D); - points_2D[points_count++] = Vector2(point_2D.x, point_2D.y); - - } else { - Vector3 next_point_3D = p_brush.faces[p_face_idx].vertices[(i + 1) % 3]; - - if (plane.has_point(next_point_3D)) { - continue; // Next point is in plane, it will be added separately. - } - if (plane.is_point_over(point_3D) == plane.is_point_over(next_point_3D)) { - continue; // Both points on the same side of the plane, ignore. - } - - // Edge crosses the plane, find and add the intersection point. - Vector3 point_2D; - if (plane.intersects_segment(point_3D, next_point_3D, &point_2D)) { - point_2D = to_2D.xform(point_2D); - points_2D[points_count++] = Vector2(point_2D.x, point_2D.y); - } - } - } - - Vector segment_indices; - Vector2 segment[2]; - int inserted_index[3] = { -1, -1, -1 }; - - // Insert points. - for (int i = 0; i < points_count; ++i) { - inserted_index[i] = _insert_point(points_2D[i]); - } - - if (points_count == 2) { - // Insert a single segment. - segment[0] = points_2D[0]; - segment[1] = points_2D[1]; - _find_edge_intersections(segment, segment_indices); - for (int i = 0; i < 2; ++i) { - _add_vertex_idx_sorted(segment_indices, inserted_index[i]); - } - _merge_faces(segment_indices); - } - - if (points_count == 3) { - // Insert three segments. - for (int edge_idx = 0; edge_idx < 3; ++edge_idx) { - segment[0] = points_2D[edge_idx]; - segment[1] = points_2D[(edge_idx + 1) % 3]; - _find_edge_intersections(segment, segment_indices); - for (int i = 0; i < 2; ++i) { - _add_vertex_idx_sorted(segment_indices, inserted_index[(edge_idx + i) % 3]); - } - _merge_faces(segment_indices); - segment_indices.clear(); - } - } -} - -void CSGBrushOperation::Build2DFaces::addFacesToMesh(MeshMerge &r_mesh_merge, bool p_smooth, bool p_invert, const Ref &p_material, bool p_from_b) { - for (int face_idx = 0; face_idx < faces.size(); ++face_idx) { - Face2D face = faces[face_idx]; - Vertex2D fv[3] = { - vertices[face.vertex_idx[0]], - vertices[face.vertex_idx[1]], - vertices[face.vertex_idx[2]] - }; - - // Convert 2D vertex points to 3D. - Vector3 points_3D[3]; - Vector2 uvs[3]; - for (int i = 0; i < 3; ++i) { - Vector3 point_2D(fv[i].point.x, fv[i].point.y, 0); - points_3D[i] = to_3D.xform(point_2D); - uvs[i] = fv[i].uv; - } - - r_mesh_merge.add_face(points_3D, uvs, p_smooth, p_invert, p_material, p_from_b); - } -} - -CSGBrushOperation::Build2DFaces::Build2DFaces(const CSGBrush &p_brush, int p_face_idx, float p_vertex_snap2) : - vertex_snap2(p_vertex_snap2 * p_vertex_snap2) { - // Convert 3D vertex points to 2D. - Vector3 points_3D[3] = { - p_brush.faces[p_face_idx].vertices[0], - p_brush.faces[p_face_idx].vertices[1], - p_brush.faces[p_face_idx].vertices[2], - }; - - plane = Plane(points_3D[0], points_3D[1], points_3D[2]); - to_3D.origin = points_3D[0]; - to_3D.basis.set_column(2, plane.normal); - to_3D.basis.set_column(0, (points_3D[1] - points_3D[2]).normalized()); - to_3D.basis.set_column(1, to_3D.basis.get_column(0).cross(to_3D.basis.get_column(2)).normalized()); - to_2D = to_3D.affine_inverse(); - - Face2D face; - for (int i = 0; i < 3; i++) { - Vertex2D vertex; - Vector3 point_2D = to_2D.xform(points_3D[i]); - vertex.point.x = point_2D.x; - vertex.point.y = point_2D.y; - vertex.uv = p_brush.faces[p_face_idx].uvs[i]; - vertices.push_back(vertex); - face.vertex_idx[i] = i; - } - faces.push_back(face); -} - -void CSGBrushOperation::update_faces(const CSGBrush &p_brush_a, const int p_face_idx_a, const CSGBrush &p_brush_b, const int p_face_idx_b, Build2DFaceCollection &p_collection, float p_vertex_snap) { - Vector3 vertices_a[3] = { - p_brush_a.faces[p_face_idx_a].vertices[0], - p_brush_a.faces[p_face_idx_a].vertices[1], - p_brush_a.faces[p_face_idx_a].vertices[2], - }; - - Vector3 vertices_b[3] = { - p_brush_b.faces[p_face_idx_b].vertices[0], - p_brush_b.faces[p_face_idx_b].vertices[1], - p_brush_b.faces[p_face_idx_b].vertices[2], - }; - - // Don't use degenerate faces. - bool has_degenerate = false; - if (is_snapable(vertices_a[0], vertices_a[1], p_vertex_snap) || - is_snapable(vertices_a[0], vertices_a[2], p_vertex_snap) || - is_snapable(vertices_a[1], vertices_a[2], p_vertex_snap)) { - p_collection.build2DFacesA[p_face_idx_a] = Build2DFaces(); - has_degenerate = true; - } - - if (is_snapable(vertices_b[0], vertices_b[1], p_vertex_snap) || - is_snapable(vertices_b[0], vertices_b[2], p_vertex_snap) || - is_snapable(vertices_b[1], vertices_b[2], p_vertex_snap)) { - p_collection.build2DFacesB[p_face_idx_b] = Build2DFaces(); - has_degenerate = true; - } - if (has_degenerate) { - return; - } - - // Ensure B has points either side of or in the plane of A. - int over_count = 0, under_count = 0; - Plane plane_a(vertices_a[0], vertices_a[1], vertices_a[2]); - ERR_FAIL_COND_MSG(plane_a.normal == Vector3(), "Couldn't form plane from Brush A face."); - - for (int i = 0; i < 3; i++) { - if (plane_a.has_point(vertices_b[i])) { - // In plane. - } else if (plane_a.is_point_over(vertices_b[i])) { - over_count++; - } else { - under_count++; - } - } - // If all points under or over the plane, there is no intersection. - if (over_count == 3 || under_count == 3) { - return; - } - - // Ensure A has points either side of or in the plane of B. - over_count = 0; - under_count = 0; - Plane plane_b(vertices_b[0], vertices_b[1], vertices_b[2]); - ERR_FAIL_COND_MSG(plane_b.normal == Vector3(), "Couldn't form plane from Brush B face."); - - for (int i = 0; i < 3; i++) { - if (plane_b.has_point(vertices_a[i])) { - // In plane. - } else if (plane_b.is_point_over(vertices_a[i])) { - over_count++; - } else { - under_count++; - } - } - // If all points under or over the plane, there is no intersection. - if (over_count == 3 || under_count == 3) { - return; - } - - // Check for intersection using the SAT theorem. - { - // Edge pair cross product combinations. - for (int i = 0; i < 3; i++) { - Vector3 axis_a = (vertices_a[i] - vertices_a[(i + 1) % 3]).normalized(); - - for (int j = 0; j < 3; j++) { - Vector3 axis_b = (vertices_b[j] - vertices_b[(j + 1) % 3]).normalized(); - - Vector3 sep_axis = axis_a.cross(axis_b); - if (sep_axis == Vector3()) { - continue; //colineal - } - sep_axis.normalize(); - - real_t min_a = 1e20, max_a = -1e20; - real_t min_b = 1e20, max_b = -1e20; - - for (int k = 0; k < 3; k++) { - real_t d = sep_axis.dot(vertices_a[k]); - min_a = MIN(min_a, d); - max_a = MAX(max_a, d); - d = sep_axis.dot(vertices_b[k]); - min_b = MIN(min_b, d); - max_b = MAX(max_b, d); - } - - min_b -= (max_a - min_a) * 0.5; - max_b += (max_a - min_a) * 0.5; - - real_t dmin = min_b - (min_a + max_a) * 0.5; - real_t dmax = max_b - (min_a + max_a) * 0.5; - - if (dmin > CMP_EPSILON || dmax < -CMP_EPSILON) { - return; // Does not contain zero, so they don't overlap. - } - } - } - } - - // If we're still here, the faces probably intersect, so add new faces. - if (!p_collection.build2DFacesA.has(p_face_idx_a)) { - p_collection.build2DFacesA[p_face_idx_a] = Build2DFaces(p_brush_a, p_face_idx_a, p_vertex_snap); - } - p_collection.build2DFacesA[p_face_idx_a].insert(p_brush_b, p_face_idx_b); - - if (!p_collection.build2DFacesB.has(p_face_idx_b)) { - p_collection.build2DFacesB[p_face_idx_b] = Build2DFaces(p_brush_b, p_face_idx_b, p_vertex_snap); - } - p_collection.build2DFacesB[p_face_idx_b].insert(p_brush_a, p_face_idx_a); -} diff --git a/modules/csg/csg.h b/modules/csg/csg.h deleted file mode 100644 index 2a0831e1ce18..000000000000 --- a/modules/csg/csg.h +++ /dev/null @@ -1,204 +0,0 @@ -/**************************************************************************/ -/* csg.h */ -/**************************************************************************/ -/* This file is part of: */ -/* GODOT ENGINE */ -/* https://godotengine.org */ -/**************************************************************************/ -/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */ -/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */ -/* */ -/* Permission is hereby granted, free of charge, to any person obtaining */ -/* a copy of this software and associated documentation files (the */ -/* "Software"), to deal in the Software without restriction, including */ -/* without limitation the rights to use, copy, modify, merge, publish, */ -/* distribute, sublicense, and/or sell copies of the Software, and to */ -/* permit persons to whom the Software is furnished to do so, subject to */ -/* the following conditions: */ -/* */ -/* The above copyright notice and this permission notice shall be */ -/* included in all copies or substantial portions of the Software. */ -/* */ -/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ -/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ -/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */ -/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ -/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ -/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ -/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -/**************************************************************************/ - -#ifndef CSG_H -#define CSG_H - -#include "core/math/aabb.h" -#include "core/math/plane.h" -#include "core/math/transform_3d.h" -#include "core/math/vector2.h" -#include "core/math/vector3.h" -#include "core/object/ref_counted.h" -#include "core/templates/list.h" -#include "core/templates/oa_hash_map.h" -#include "core/templates/vector.h" -#include "scene/resources/material.h" - -struct CSGBrush { - struct Face { - Vector3 vertices[3]; - Vector2 uvs[3]; - AABB aabb; - bool smooth = false; - bool invert = false; - int material = 0; - }; - - Vector faces; - Vector> materials; - - inline void _regen_face_aabbs(); - - // Create a brush from faces. - void build_from_faces(const Vector &p_vertices, const Vector &p_uvs, const Vector &p_smooth, const Vector> &p_materials, const Vector &p_invert_faces); - void copy_from(const CSGBrush &p_brush, const Transform3D &p_xform); -}; - -struct CSGBrushOperation { - enum Operation { - OPERATION_UNION, - OPERATION_INTERSECTION, - OPERATION_SUBTRACTION, - }; - - void merge_brushes(Operation p_operation, const CSGBrush &p_brush_a, const CSGBrush &p_brush_b, CSGBrush &r_merged_brush, float p_vertex_snap); - - struct MeshMerge { - struct Face { - bool from_b = false; - bool inside = false; - int points[3] = {}; - Vector2 uvs[3]; - bool smooth = false; - bool invert = false; - int material_idx = 0; - }; - - struct FaceBVH { - int face = 0; - int left = 0; - int right = 0; - int next = 0; - Vector3 center; - AABB aabb; - }; - - struct FaceBVHCmpX { - _FORCE_INLINE_ bool operator()(const FaceBVH *p_left, const FaceBVH *p_right) const { - return p_left->center.x < p_right->center.x; - } - }; - - struct FaceBVHCmpY { - _FORCE_INLINE_ bool operator()(const FaceBVH *p_left, const FaceBVH *p_right) const { - return p_left->center.y < p_right->center.y; - } - }; - struct FaceBVHCmpZ { - _FORCE_INLINE_ bool operator()(const FaceBVH *p_left, const FaceBVH *p_right) const { - return p_left->center.z < p_right->center.z; - } - }; - - struct VertexKey { - int32_t x, y, z; - _FORCE_INLINE_ bool operator<(const VertexKey &p_key) const { - if (x == p_key.x) { - if (y == p_key.y) { - return z < p_key.z; - } else { - return y < p_key.y; - } - } else { - return x < p_key.x; - } - } - - _FORCE_INLINE_ bool operator==(const VertexKey &p_key) const { - return (x == p_key.x && y == p_key.y && z == p_key.z); - } - }; - - struct VertexKeyHash { - static _FORCE_INLINE_ uint32_t hash(const VertexKey &p_vk) { - uint32_t h = hash_murmur3_one_32(p_vk.x); - h = hash_murmur3_one_32(p_vk.y, h); - h = hash_murmur3_one_32(p_vk.z, h); - return h; - } - }; - struct Intersection { - bool found = false; - real_t conormal = FLT_MAX; - real_t distance_squared = FLT_MAX; - real_t origin_angle = FLT_MAX; - }; - - struct IntersectionDistance { - bool is_conormal; - real_t distance_squared; - }; - - Vector points; - Vector faces; - HashMap, int> materials; - HashMap vertex_map; - OAHashMap snap_cache; - float vertex_snap = 0.0; - - inline void _add_distance(List &r_intersectionsA, List &r_intersectionsB, bool p_from_B, real_t p_distance, bool p_is_conormal) const; - inline bool _bvh_inside(FaceBVH *r_facebvhptr, int p_max_depth, int p_bvh_first, int p_face_idx) const; - inline int _create_bvh(FaceBVH *r_facebvhptr, FaceBVH **r_facebvhptrptr, int p_from, int p_size, int p_depth, int &r_max_depth, int &r_max_alloc); - - void add_face(const Vector3 p_points[3], const Vector2 p_uvs[3], bool p_smooth, bool p_invert, const Ref &p_material, bool p_from_b); - void mark_inside_faces(); - }; - - struct Build2DFaces { - struct Vertex2D { - Vector2 point; - Vector2 uv; - }; - - struct Face2D { - int vertex_idx[3] = {}; - }; - - Vector vertices; - Vector faces; - Plane plane; - Transform3D to_2D; - Transform3D to_3D; - float vertex_snap2 = 0.0; - - inline int _get_point_idx(const Vector2 &p_point); - inline int _add_vertex(const Vertex2D &p_vertex); - inline void _add_vertex_idx_sorted(Vector &r_vertex_indices, int p_new_vertex_index); - inline void _merge_faces(const Vector &p_segment_indices); - inline void _find_edge_intersections(const Vector2 p_segment_points[2], Vector &r_segment_indices); - inline int _insert_point(const Vector2 &p_point); - - void insert(const CSGBrush &p_brush, int p_brush_face); - void addFacesToMesh(MeshMerge &r_mesh_merge, bool p_smooth, bool p_invert, const Ref &p_material, bool p_from_b); - - Build2DFaces() {} - Build2DFaces(const CSGBrush &p_brush, int p_brush_face, float p_vertex_snap2); - }; - - struct Build2DFaceCollection { - HashMap build2DFacesA; - HashMap build2DFacesB; - }; - - void update_faces(const CSGBrush &p_brush_a, const int p_face_idx_a, const CSGBrush &p_brush_b, const int p_face_idx_b, Build2DFaceCollection &p_collection, float p_vertex_snap); -}; - -#endif // CSG_H diff --git a/modules/csg/csg_shape.cpp b/modules/csg/csg_shape.cpp index 7c93fbf081b2..2ad7aa290fed 100644 --- a/modules/csg/csg_shape.cpp +++ b/modules/csg/csg_shape.cpp @@ -32,6 +32,306 @@ #include "core/math/geometry_2d.h" +#include "thirdparty/manifold/src/manifold/include/manifold.h" +#include "thirdparty/manifold/src/utilities/include/public.h" + +// Static helper functions. + +struct CSGBrush { + struct Face { + Vector3 vertices[3]; + Vector2 uvs[3]; + AABB aabb; + bool smooth = false; + bool invert = false; + int material = 0; + }; + + Vector faces; + Vector> materials; + + manifold::Manifold manifold; + enum { + MANIFOLD_PROPERTY_POS_X = 0, + MANIFOLD_PROPERTY_POS_Y, + MANIFOLD_PROPERTY_POS_Z, + MANIFOLD_PROPERTY_UV_X, + MANIFOLD_PROPERTY_UV_Y, + MANIFOLD_PROPERTY_MATERIAL, + MANIFOLD_PROPERTY_SMOOTH, + MANIFOLD_PROPERTY_INVERT, + MANIFOLD_MAX + }; + static constexpr int MANIFOLD_TRIANGLE = 3; + + inline void _regen_face_aabbs() { + for (int i = 0; i < faces.size(); i++) { + faces.write[i].aabb = AABB(); + faces.write[i].aabb.position = faces[i].vertices[0]; + faces.write[i].aabb.expand_to(faces[i].vertices[1]); + faces.write[i].aabb.expand_to(faces[i].vertices[2]); + } + } + + void pack_manifold(const float p_snap, HashMap> &r_materials) { + if (faces.is_empty()) { + return; + } + faces.sort_custom(); + + HashSet unique_materials; + for (CSGBrush::Face &face : faces) { + unique_materials.insert(face.material); + } + + LocalVector split_brushes; + CowData::Size unique_face_i = 0; + for (int32_t material_id : unique_materials) { + uint32_t reserved_id = manifold.ReserveIDs(1); + CSGBrush current_brush; + Ref material; + if (material_id >= 0 && material_id < materials.size()) { + material = materials[material_id]; + } + CSGBrush::Face starting_face = faces[unique_face_i]; + while (unique_face_i < faces.size()) { + CSGBrush::Face face = faces[unique_face_i]; + if (starting_face.material != face.material) { + break; + } + face.material = reserved_id; + current_brush.faces.push_back(face); + unique_face_i++; + } + manifold::MeshGL mesh; + mesh.runOriginalID.push_back(reserved_id); + mesh.triVerts.resize(current_brush.faces.size() * CSGBrush::MANIFOLD_TRIANGLE, 0); + mesh.vertProperties.resize(current_brush.faces.size() * CSGBrush::MANIFOLD_TRIANGLE * CSGBrush::MANIFOLD_MAX, std::numeric_limits::quiet_NaN()); + mesh.numProp = CSGBrush::MANIFOLD_MAX; + constexpr size_t order[CSGBrush::MANIFOLD_TRIANGLE] = { 0, 2, 1 }; + for (CowData::Size face_i = 0; face_i < current_brush.faces.size(); face_i++) { + const CSGBrush::Face &face = current_brush.faces[face_i]; + for (size_t vertex_i = 0; vertex_i < CSGBrush::MANIFOLD_TRIANGLE; vertex_i++) { + int32_t index = face_i * CSGBrush::MANIFOLD_TRIANGLE + vertex_i; + mesh.triVerts[face_i * CSGBrush::MANIFOLD_TRIANGLE + order[vertex_i]] = index; + Vector3 pos = face.vertices[vertex_i]; + Vector2 uv = face.uvs[vertex_i]; + mesh.vertProperties[index * CSGBrush::MANIFOLD_MAX + CSGBrush::MANIFOLD_PROPERTY_POS_X] = pos.x; + mesh.vertProperties[index * CSGBrush::MANIFOLD_MAX + CSGBrush::MANIFOLD_PROPERTY_POS_Y] = pos.y; + mesh.vertProperties[index * CSGBrush::MANIFOLD_MAX + CSGBrush::MANIFOLD_PROPERTY_POS_Z] = pos.z; + mesh.vertProperties[index * CSGBrush::MANIFOLD_MAX + CSGBrush::MANIFOLD_PROPERTY_UV_X] = uv.x; + mesh.vertProperties[index * CSGBrush::MANIFOLD_MAX + CSGBrush::MANIFOLD_PROPERTY_UV_Y] = uv.y; + mesh.vertProperties[index * CSGBrush::MANIFOLD_MAX + CSGBrush::MANIFOLD_PROPERTY_MATERIAL] = reserved_id; + mesh.vertProperties[index * CSGBrush::MANIFOLD_MAX + CSGBrush::MANIFOLD_PROPERTY_SMOOTH] = face.smooth ? 1.0f : 0.0f; + mesh.vertProperties[index * CSGBrush::MANIFOLD_MAX + CSGBrush::MANIFOLD_PROPERTY_INVERT] = face.invert ? 1.0f : 0.0f; + } + } + mesh.precision = p_snap; + mesh.Merge(); + r_materials.insert(reserved_id, material); + manifold = manifold.Boolean(manifold::Manifold(mesh), manifold::OpType::Add); + split_brushes.push_back(current_brush); + } + } + + void unpack_manifold(const HashMap> &p_materials) { + Ref default_material; + default_material.instantiate(); + manifold::MeshGL mesh = manifold.GetMeshGL(); + LocalVector manifold_positions; + manifold_positions.resize(mesh.vertProperties.size()); + LocalVector manifold_uvs; + manifold_uvs.resize(mesh.vertProperties.size()); + LocalVector manifold_materials; + manifold_materials.resize(mesh.vertProperties.size()); + LocalVector manifold_smooths; + manifold_smooths.resize(mesh.vertProperties.size()); + LocalVector manifold_inverts; + manifold_inverts.resize(mesh.vertProperties.size()); + ERR_FAIL_COND_MSG(mesh.vertProperties.size() % mesh.numProp != 0, "Invalid vertex properties size"); + size_t position_index = 0; + size_t uv_index = 0; + size_t material_index = 0; + size_t smooth_index = 0; + size_t invert_index = 0; + for (size_t property_i = 0; property_i < mesh.vertProperties.size(); property_i += CSGBrush::MANIFOLD_MAX) { + manifold_positions[position_index++] = Vector3(mesh.vertProperties[property_i + CSGBrush::MANIFOLD_PROPERTY_POS_X], mesh.vertProperties[property_i + CSGBrush::MANIFOLD_PROPERTY_POS_Y], mesh.vertProperties[property_i + MANIFOLD_PROPERTY_POS_Z]); + manifold_uvs[uv_index++] = Vector2(mesh.vertProperties[property_i + CSGBrush::MANIFOLD_PROPERTY_UV_X], mesh.vertProperties[property_i + CSGBrush::MANIFOLD_PROPERTY_UV_Y]); + manifold_materials[material_index++] = static_cast(Math::round(mesh.vertProperties[property_i + CSGBrush::MANIFOLD_PROPERTY_MATERIAL])); + manifold_smooths[smooth_index++] = mesh.vertProperties[property_i + CSGBrush::MANIFOLD_PROPERTY_SMOOTH] > 0.5f; + manifold_inverts[invert_index++] = mesh.vertProperties[property_i + CSGBrush::MANIFOLD_PROPERTY_INVERT] > 0.5f; + } + faces.resize(mesh.triVerts.size() / CSGBrush::MANIFOLD_TRIANGLE); + constexpr int32_t order[CSGBrush::MANIFOLD_TRIANGLE] = { 0, 2, 1 }; + for (size_t triangle_i = 0; triangle_i < mesh.triVerts.size() / CSGBrush::MANIFOLD_TRIANGLE; triangle_i++) { + CSGBrush::Face &face = faces.write[triangle_i]; + for (int32_t vertex_i = 0; vertex_i < 3; vertex_i++) { + int32_t index = mesh.triVerts[triangle_i * CSGBrush::MANIFOLD_TRIANGLE + order[vertex_i]]; + Vector3 position = manifold_positions[index]; + Vector2 uv = manifold_uvs[index]; + face.vertices[vertex_i] = Vector3(position.x, position.y, position.z); + face.uvs[vertex_i] = Vector2(uv.x, uv.y); + face.smooth = manifold_smooths[index]; + face.invert = manifold_inverts[index]; + uint32_t rid_id = manifold_materials[index]; + if (p_materials.has(rid_id)) { + Ref material = p_materials[rid_id]; + int32_t material_id = materials.find(material); + if (material_id != -1) { + face.material = material_id; + } else { + face.material = materials.size(); + materials.push_back(material); + } + continue; + } + int32_t material_id = materials.find(default_material); + if (material_id != -1) { + face.material = material_id; + } else { + face.material = manifold_materials.size(); + materials.push_back(default_material); + } + } + } + + _regen_face_aabbs(); + } + + class MaterialIndexComparator { + public: + bool operator()(const CSGBrush::Face &a, const CSGBrush::Face &b) const { + return a.material < b.material; + } + }; + + // Create a brush from faces. + void build_from_faces(const Vector &p_vertices, const Vector &p_uvs, const Vector &p_smooth, const Vector> &p_materials, const Vector &p_invert_faces); + void copy_from(const CSGBrush &p_brush, const Transform3D &p_xform, float p_snap, HashMap> &r_mesh_materials); +}; + +inline bool is_point_in_triangle(const Vector3 &p_point, const Vector3 p_vertices[3], int p_shifted = 0) { + real_t det = p_vertices[0].dot(p_vertices[1].cross(p_vertices[2])); + + // If determinant is, zero try shift the triangle and the point. + if (Math::is_zero_approx(det)) { + if (p_shifted > 2) { + // Triangle appears degenerate, so ignore it. + return false; + } + Vector3 shift_by; + shift_by[p_shifted] = 1; + Vector3 shifted_point = p_point + shift_by; + Vector3 shifted_vertices[3] = { p_vertices[0] + shift_by, p_vertices[1] + shift_by, p_vertices[2] + shift_by }; + return is_point_in_triangle(shifted_point, shifted_vertices, p_shifted + 1); + } + + // Find the barycentric coordinates of the point with respect to the vertices. + real_t lambda[3]; + lambda[0] = p_vertices[1].cross(p_vertices[2]).dot(p_point) / det; + lambda[1] = p_vertices[2].cross(p_vertices[0]).dot(p_point) / det; + lambda[2] = p_vertices[0].cross(p_vertices[1]).dot(p_point) / det; + + // Point is in the plane if all lambdas sum to 1. + if (!Math::is_equal_approx(lambda[0] + lambda[1] + lambda[2], 1)) { + return false; + } + + // Point is inside the triangle if all lambdas are positive. + if (lambda[0] < 0 || lambda[1] < 0 || lambda[2] < 0) { + return false; + } + + return true; +} + +// CSGBrush + +void CSGBrush::build_from_faces(const Vector &p_vertices, const Vector &p_uvs, const Vector &p_smooth, const Vector> &p_materials, const Vector &p_flip_faces) { + faces.clear(); + + int vc = p_vertices.size(); + + ERR_FAIL_COND((vc % 3) != 0); + + const Vector3 *rv = p_vertices.ptr(); + int uvc = p_uvs.size(); + const Vector2 *ruv = p_uvs.ptr(); + int sc = p_smooth.size(); + const bool *rs = p_smooth.ptr(); + int mc = p_materials.size(); + const Ref *rm = p_materials.ptr(); + int ic = p_flip_faces.size(); + const bool *ri = p_flip_faces.ptr(); + + HashMap, int> material_map; + + faces.resize(p_vertices.size() / 3); + + for (int i = 0; i < faces.size(); i++) { + Face &f = faces.write[i]; + f.vertices[0] = rv[i * 3 + 0]; + f.vertices[1] = rv[i * 3 + 1]; + f.vertices[2] = rv[i * 3 + 2]; + + if (uvc == vc) { + f.uvs[0] = ruv[i * 3 + 0]; + f.uvs[1] = ruv[i * 3 + 1]; + f.uvs[2] = ruv[i * 3 + 2]; + } + + if (sc == vc / 3) { + f.smooth = rs[i]; + } else { + f.smooth = false; + } + + if (ic == vc / 3) { + f.invert = ri[i]; + } else { + f.invert = false; + } + + if (mc == vc / 3) { + Ref mat = rm[i]; + if (mat.is_valid()) { + HashMap, int>::ConstIterator E = material_map.find(mat); + + if (E) { + f.material = E->value; + } else { + f.material = material_map.size(); + material_map[mat] = f.material; + } + + } else { + f.material = -1; + } + } + } + + materials.resize(material_map.size()); + for (const KeyValue, int> &E : material_map) { + materials.write[E.value] = E.key; + } + + _regen_face_aabbs(); +} + +void CSGBrush::copy_from(const CSGBrush &p_brush, const Transform3D &p_xform, float p_snap, HashMap> &r_mesh_materials) { + faces = p_brush.faces; + materials = p_brush.materials; + + for (int i = 0; i < faces.size(); i++) { + for (int j = 0; j < 3; j++) { + faces.write[i].vertices[j] = p_xform.xform(p_brush.faces[i].vertices[j]); + } + } + + _regen_face_aabbs(); + pack_manifold(p_snap, r_mesh_materials); +} + void CSGShape3D::set_use_collision(bool p_enable) { if (use_collision == p_enable) { return; @@ -170,6 +470,7 @@ CSGBrush *CSGShape3D::_get_brush() { brush = nullptr; CSGBrush *n = _build_brush(); + HashMap> mesh_materials; for (int i = 0; i < get_child_count(); i++) { CSGShape3D *child = Object::cast_to(get_child(i)); @@ -186,29 +487,31 @@ CSGBrush *CSGShape3D::_get_brush() { } if (!n) { n = memnew(CSGBrush); + n->manifold = manifold::Manifold(); + mesh_materials.clear(); - n->copy_from(*n2, child->get_transform()); + n->copy_from(*n2, child->get_transform(), snap, mesh_materials); } else { + n->pack_manifold(snap, mesh_materials); CSGBrush *nn = memnew(CSGBrush); CSGBrush *nn2 = memnew(CSGBrush); - nn2->copy_from(*n2, child->get_transform()); - - CSGBrushOperation bop; - + manifold::Manifold manifold_nn2; + nn2->copy_from(*n2, child->get_transform(), snap, mesh_materials); switch (child->get_operation()) { case CSGShape3D::OPERATION_UNION: - bop.merge_brushes(CSGBrushOperation::OPERATION_UNION, *n, *nn2, *nn, snap); + nn->manifold = n->manifold.Boolean(nn2->manifold, manifold::OpType::Add); break; case CSGShape3D::OPERATION_INTERSECTION: - bop.merge_brushes(CSGBrushOperation::OPERATION_INTERSECTION, *n, *nn2, *nn, snap); + nn->manifold = n->manifold.Boolean(nn2->manifold, manifold::OpType::Intersect); break; case CSGShape3D::OPERATION_SUBTRACTION: - bop.merge_brushes(CSGBrushOperation::OPERATION_SUBTRACTION, *n, *nn2, *nn, snap); + nn->manifold = n->manifold.Boolean(nn2->manifold, manifold::OpType::Subtract); break; } memdelete(n); memdelete(nn2); + nn->unpack_manifold(mesh_materials); n = nn; } } @@ -230,6 +533,7 @@ CSGBrush *CSGShape3D::_get_brush() { } brush = n; + brush->pack_manifold(snap, mesh_materials); dirty = false; } diff --git a/modules/csg/csg_shape.h b/modules/csg/csg_shape.h index bb7c8be43155..501c42eaba7f 100644 --- a/modules/csg/csg_shape.h +++ b/modules/csg/csg_shape.h @@ -31,14 +31,13 @@ #ifndef CSG_SHAPE_H #define CSG_SHAPE_H -#include "csg.h" - #include "scene/3d/path_3d.h" #include "scene/3d/visual_instance_3d.h" #include "scene/resources/3d/concave_polygon_shape_3d.h" #include "thirdparty/misc/mikktspace.h" +struct CSGBrush; class CSGShape3D : public GeometryInstance3D { GDCLASS(CSGShape3D, GeometryInstance3D); diff --git a/thirdparty/manifold/.gitrepo b/thirdparty/manifold/.gitrepo new file mode 100644 index 000000000000..1b57ab9f6248 --- /dev/null +++ b/thirdparty/manifold/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/ingydotnet/git-subrepo#readme +; +[subrepo] + remote = https://github.com/elalish/manifold.git + branch = master + commit = 53015422549ef9f2d8bca24d4fc47d6c3e80ece0 + parent = 1f00d4751e9b895b191a08a830c436dc4f7b2372 + method = merge + cmdver = 0.4.6 diff --git a/thirdparty/manifold/AUTHORS b/thirdparty/manifold/AUTHORS new file mode 100644 index 000000000000..a3e3c587f416 --- /dev/null +++ b/thirdparty/manifold/AUTHORS @@ -0,0 +1,10 @@ +# This is the list of Manifold's significant contributors. +# +# This does not necessarily list everyone who has contributed code, +# especially since many employees of one corporation may be contributing. +# To see the full list of contributors, see the revision history in +# source control. +Emmett Lalish +Chun Kit LAM +Geoff deRosenroll +Google LLC diff --git a/thirdparty/manifold/LICENSE b/thirdparty/manifold/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/thirdparty/manifold/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/thirdparty/manifold/src/collider/include/collider.h b/thirdparty/manifold/src/collider/include/collider.h new file mode 100644 index 000000000000..8d750815c95e --- /dev/null +++ b/thirdparty/manifold/src/collider/include/collider.h @@ -0,0 +1,46 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "public.h" +#include "sparse.h" +#include "vec.h" + +namespace manifold { + +/** @ingroup Private */ +class Collider { + public: + Collider() {} + Collider(const VecView& leafBB, + const VecView& leafMorton); + bool Transform(glm::mat4x3); + void UpdateBoxes(const VecView& leafBB); + template + SparseIndices Collisions(const VecView& queriesIn) const; + + private: + Vec nodeBBox_; + Vec nodeParent_; + // even nodes are leaves, odd nodes are internal, root is 1 + Vec> internalChildren_; + + int NumInternal() const { return internalChildren_.size(); }; + int NumLeaves() const { + return internalChildren_.empty() ? 0 : (NumInternal() + 1); + }; +}; + +} // namespace manifold diff --git a/thirdparty/manifold/src/collider/src/collider.cpp b/thirdparty/manifold/src/collider/src/collider.cpp new file mode 100644 index 000000000000..ca32f653d94f --- /dev/null +++ b/thirdparty/manifold/src/collider/src/collider.cpp @@ -0,0 +1,395 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "collider.h" + +#include "par.h" +#include "utils.h" + +#ifdef _MSC_VER +#include +#endif + +// Adjustable parameters +constexpr int kInitialLength = 128; +constexpr int kLengthMultiple = 4; +constexpr int kSequentialThreshold = 512; +// Fundamental constants +constexpr int kRoot = 1; + +#ifdef _MSC_VER + +#ifndef _WINDEF_ +typedef unsigned long DWORD; +#endif + +uint32_t __inline ctz(uint32_t value) { + DWORD trailing_zero = 0; + + if (_BitScanForward(&trailing_zero, value)) { + return trailing_zero; + } else { + // This is undefined, I better choose 32 than 0 + return 32; + } +} + +uint32_t __inline clz(uint32_t value) { + DWORD leading_zero = 0; + + if (_BitScanReverse(&leading_zero, value)) { + return 31 - leading_zero; + } else { + // Same remarks as above + return 32; + } +} +#endif + +namespace { +using namespace manifold; + +bool IsLeaf(int node) { return node % 2 == 0; } +bool IsInternal(int node) { return node % 2 == 1; } +int Node2Internal(int node) { return (node - 1) / 2; } +int Internal2Node(int internal) { return internal * 2 + 1; } +int Node2Leaf(int node) { return node / 2; } +int Leaf2Node(int leaf) { return leaf * 2; } + +struct CreateRadixTree { + VecView nodeParent_; + VecView> internalChildren_; + const VecView leafMorton_; + + int PrefixLength(uint32_t a, uint32_t b) const { +// count-leading-zeros is used to find the number of identical highest-order +// bits +#ifdef _MSC_VER + // return __lzcnt(a ^ b); + return clz(a ^ b); +#else + return __builtin_clz(a ^ b); +#endif + } + + int PrefixLength(int i, int j) const { + if (j < 0 || j >= leafMorton_.size()) { + return -1; + } else { + int out; + if (leafMorton_[i] == leafMorton_[j]) + // use index to disambiguate + out = 32 + + PrefixLength(static_cast(i), static_cast(j)); + else + out = PrefixLength(leafMorton_[i], leafMorton_[j]); + return out; + } + } + + int RangeEnd(int i) const { + // Determine direction of range (+1 or -1) + int dir = PrefixLength(i, i + 1) - PrefixLength(i, i - 1); + dir = (dir > 0) - (dir < 0); + // Compute conservative range length with exponential increase + int commonPrefix = PrefixLength(i, i - dir); + int max_length = kInitialLength; + while (PrefixLength(i, i + dir * max_length) > commonPrefix) + max_length *= kLengthMultiple; + // Compute precise range length with binary search + int length = 0; + for (int step = max_length / 2; step > 0; step /= 2) { + if (PrefixLength(i, i + dir * (length + step)) > commonPrefix) + length += step; + } + return i + dir * length; + } + + int FindSplit(int first, int last) const { + int commonPrefix = PrefixLength(first, last); + // Find the furthest object that shares more than commonPrefix bits with the + // first one, using binary search. + int split = first; + int step = last - first; + do { + step = (step + 1) >> 1; // divide by 2, rounding up + int newSplit = split + step; + if (newSplit < last) { + int splitPrefix = PrefixLength(first, newSplit); + if (splitPrefix > commonPrefix) split = newSplit; + } + } while (step > 1); + return split; + } + + void operator()(int internal) { + int first = internal; + // Find the range of objects with a common prefix + int last = RangeEnd(first); + if (first > last) thrust::swap(first, last); + // Determine where the next-highest difference occurs + int split = FindSplit(first, last); + int child1 = split == first ? Leaf2Node(split) : Internal2Node(split); + ++split; + int child2 = split == last ? Leaf2Node(split) : Internal2Node(split); + // Record parent_child relationships. + internalChildren_[internal].first = child1; + internalChildren_[internal].second = child2; + int node = Internal2Node(internal); + nodeParent_[child1] = node; + nodeParent_[child2] = node; + } +}; + +template +struct FindCollisions { + VecView nodeBBox_; + VecView> internalChildren_; + Recorder recorder; + + int RecordCollision(int node, thrust::tuple& query) { + const T& queryObj = thrust::get<0>(query); + const int queryIdx = thrust::get<1>(query); + + bool overlaps = nodeBBox_[node].DoesOverlap(queryObj); + if (overlaps && IsLeaf(node)) { + int leafIdx = Node2Leaf(node); + if (!selfCollision || leafIdx != queryIdx) { + recorder.record(queryIdx, leafIdx); + } + } + return overlaps && IsInternal(node); // Should traverse into node + } + + void operator()(thrust::tuple query) { + // stack cannot overflow because radix tree has max depth 30 (Morton code) + + // 32 (index). + int stack[64]; + int top = -1; + // Depth-first search + int node = kRoot; + const int queryIdx = thrust::get<1>(query); + // same implies that this query do not have any collision + if (recorder.earlyexit(queryIdx)) return; + while (1) { + int internal = Node2Internal(node); + int child1 = internalChildren_[internal].first; + int child2 = internalChildren_[internal].second; + + int traverse1 = RecordCollision(child1, query); + int traverse2 = RecordCollision(child2, query); + + if (!traverse1 && !traverse2) { + if (top < 0) break; // done + node = stack[top--]; // get a saved node + } else { + node = traverse1 ? child1 : child2; // go here next + if (traverse1 && traverse2) { + stack[++top] = child2; // save the other for later + } + } + } + recorder.end(queryIdx); + } +}; + +struct CountCollisions { + VecView counts; + VecView empty; + void record(int queryIdx, int _leafIdx) { counts[queryIdx]++; } + bool earlyexit(int _queryIdx) { return false; } + void end(int queryIdx) { + if (counts[queryIdx] == 0) empty[queryIdx] = 1; + } +}; + +template +struct SeqCollisionRecorder { + SparseIndices& queryTri_; + void record(int queryIdx, int leafIdx) const { + if (inverted) + queryTri_.Add(leafIdx, queryIdx); + else + queryTri_.Add(queryIdx, leafIdx); + } + bool earlyexit(int queryIdx) const { return false; } + void end(int queryIdx) const {} +}; + +template +struct ParCollisionRecorder { + SparseIndices& queryTri; + VecView counts; + VecView empty; + void record(int queryIdx, int leafIdx) { + int pos = counts[queryIdx]++; + if (inverted) + queryTri.Set(pos, leafIdx, queryIdx); + else + queryTri.Set(pos, queryIdx, leafIdx); + } + bool earlyexit(int queryIdx) const { return empty[queryIdx] == 1; } + void end(int queryIdx) const {} +}; + +struct BuildInternalBoxes { + VecView nodeBBox_; + VecView counter_; + const VecView nodeParent_; + const VecView> internalChildren_; + + void operator()(int leaf) { + int node = Leaf2Node(leaf); + do { + node = nodeParent_[node]; + int internal = Node2Internal(node); + if (AtomicAdd(counter_[internal], 1) == 0) return; + nodeBBox_[node] = nodeBBox_[internalChildren_[internal].first].Union( + nodeBBox_[internalChildren_[internal].second]); + } while (node != kRoot); + } +}; + +struct TransformBox { + const glm::mat4x3 transform; + void operator()(Box& box) { box = box.Transform(transform); } +}; +} // namespace + +namespace manifold { + +/** + * Creates a Bounding Volume Hierarchy (BVH) from an input set of axis-aligned + * bounding boxes and corresponding Morton codes. It is assumed these vectors + * are already sorted by increasing Morton code. + */ +Collider::Collider(const VecView& leafBB, + const VecView& leafMorton) { + ZoneScoped; + ASSERT(leafBB.size() == leafMorton.size(), userErr, + "vectors must be the same length"); + int num_nodes = 2 * leafBB.size() - 1; + // assign and allocate members + nodeBBox_.resize(num_nodes); + nodeParent_.resize(num_nodes, -1); + internalChildren_.resize(leafBB.size() - 1, thrust::make_pair(-1, -1)); + // organize tree + for_each_n(autoPolicy(NumInternal()), countAt(0), NumInternal(), + CreateRadixTree({nodeParent_, internalChildren_, leafMorton})); + UpdateBoxes(leafBB); +} + +/** + * For a vector of query objects, this returns a sparse array of overlaps + * between the queries and the bounding boxes of the collider. Queries are + * normally axis-aligned bounding boxes. Points can also be used, and this case + * overlaps are defined as lying in the XY projection of the bounding box. If + * the query vector is the leaf vector, set selfCollision to true, which will + * then not report any collisions between an index and itself. + */ +template +SparseIndices Collider::Collisions(const VecView& queriesIn) const { + ZoneScoped; + // note that the length is 1 larger than the number of queries so the last + // element can store the sum when using exclusive scan + if (queriesIn.size() < kSequentialThreshold) { + SparseIndices queryTri; + for_each_n(ExecutionPolicy::Seq, zip(queriesIn.cbegin(), countAt(0)), + queriesIn.size(), + FindCollisions>{ + nodeBBox_, internalChildren_, {queryTri}}); + return queryTri; + } else { + // compute the number of collisions to determine the size for allocation and + // offset, this avoids the need for atomic + Vec counts(queriesIn.size() + 1, 0); + Vec empty(queriesIn.size(), 0); + for_each_n(ExecutionPolicy::Par, zip(queriesIn.cbegin(), countAt(0)), + queriesIn.size(), + FindCollisions{ + nodeBBox_, internalChildren_, {counts, empty}}); + // compute start index for each query and total count + exclusive_scan(ExecutionPolicy::Par, counts.begin(), counts.end(), + counts.begin(), 0, std::plus()); + if (counts.back() == 0) return SparseIndices(0); + SparseIndices queryTri(counts.back()); + // actually recording collisions + for_each_n(ExecutionPolicy::Par, zip(queriesIn.cbegin(), countAt(0)), + queriesIn.size(), + FindCollisions>{ + nodeBBox_, internalChildren_, {queryTri, counts, empty}}); + return queryTri; + } +} + +/** + * Recalculate the collider's internal bounding boxes without changing the + * hierarchy. + */ +void Collider::UpdateBoxes(const VecView& leafBB) { + ZoneScoped; + ASSERT(leafBB.size() == NumLeaves(), userErr, + "must have the same number of updated boxes as original"); + // copy in leaf node Boxes + strided_range::Iter> leaves(nodeBBox_.begin(), nodeBBox_.end(), 2); + auto policy = autoPolicy(NumInternal()); + copy(policy, leafBB.cbegin(), leafBB.cend(), leaves.begin()); + // create global counters + Vec counter(NumInternal(), 0); + // kernel over leaves to save internal Boxes + for_each_n( + policy, countAt(0), NumLeaves(), + BuildInternalBoxes({nodeBBox_, counter, nodeParent_, internalChildren_})); +} + +/** + * Apply axis-aligned transform to all bounding boxes. If transform is not + * axis-aligned, abort and return false to indicate recalculation is necessary. + */ +bool Collider::Transform(glm::mat4x3 transform) { + ZoneScoped; + bool axisAligned = true; + for (int row : {0, 1, 2}) { + int count = 0; + for (int col : {0, 1, 2}) { + if (transform[col][row] == 0.0f) ++count; + } + if (count != 2) axisAligned = false; + } + if (axisAligned) { + for_each(autoPolicy(nodeBBox_.size()), nodeBBox_.begin(), nodeBBox_.end(), + TransformBox({transform})); + } + return axisAligned; +} + +template SparseIndices Collider::Collisions( + const VecView&) const; + +template SparseIndices Collider::Collisions( + const VecView&) const; + +template SparseIndices Collider::Collisions( + const VecView&) const; + +template SparseIndices Collider::Collisions( + const VecView&) const; + +template SparseIndices Collider::Collisions( + const VecView&) const; + +template SparseIndices Collider::Collisions( + const VecView&) const; + +} // namespace manifold diff --git a/thirdparty/manifold/src/cross_section/include/cross_section.h b/thirdparty/manifold/src/cross_section/include/cross_section.h new file mode 100644 index 000000000000..9e15bb5297f8 --- /dev/null +++ b/thirdparty/manifold/src/cross_section/include/cross_section.h @@ -0,0 +1,176 @@ +// Copyright 2023 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "glm/ext/matrix_float3x2.hpp" +#include "glm/ext/vector_float2.hpp" +#include "public.h" +#include "vec_view.h" + +namespace manifold { + +/** @addtogroup Core + * @{ + */ + +struct PathImpl; + +/** + * Two-dimensional cross sections guaranteed to be without self-intersections, + * or overlaps between polygons (from construction onwards). This class makes + * use of the [Clipper2](http://www.angusj.com/clipper2/Docs/Overview.htm) + * library for polygon clipping (boolean) and offsetting operations. + */ +class CrossSection { + public: + /** @name Creation + * Constructors + */ + ///@{ + + CrossSection(); + ~CrossSection(); + + CrossSection(const CrossSection& other); + CrossSection& operator=(const CrossSection& other); + CrossSection(CrossSection&&) noexcept; + CrossSection& operator=(CrossSection&&) noexcept; + + // Adapted from Clipper2 docs: + // http://www.angusj.com/clipper2/Docs/Units/Clipper/Types/FillRule.htm + // (Copyright © 2010-2023 Angus Johnson) + /** + * Filling rules defining which polygon sub-regions are considered to be + * inside a given polygon, and which sub-regions will not (based on winding + * numbers). See the [Clipper2 + * docs](http://www.angusj.com/clipper2/Docs/Units/Clipper/Types/FillRule.htm) + * for a detailed explaination with illusrations. + */ + enum class FillRule { + EvenOdd, ///< Only odd numbered sub-regions are filled. + NonZero, ///< Only non-zero sub-regions are filled. + Positive, ///< Only sub-regions with winding counts > 0 are filled. + Negative ///< Only sub-regions with winding counts < 0 are filled. + }; + + CrossSection(const SimplePolygon& contour, + FillRule fillrule = FillRule::Positive); + CrossSection(const Polygons& contours, + FillRule fillrule = FillRule::Positive); + CrossSection(const Rect& rect); + static CrossSection Square(const glm::vec2 dims, bool center = false); + static CrossSection Circle(float radius, int circularSegments = 0); + ///@} + + /** @name Information + * Details of the cross-section + */ + ///@{ + double Area() const; + int NumVert() const; + int NumContour() const; + bool IsEmpty() const; + Rect Bounds() const; + ///@} + + /** @name Modification + */ + ///@{ + CrossSection Translate(const glm::vec2 v) const; + CrossSection Rotate(float degrees) const; + CrossSection Scale(const glm::vec2 s) const; + CrossSection Mirror(const glm::vec2 ax) const; + CrossSection Transform(const glm::mat3x2& m) const; + CrossSection Warp(std::function warpFunc) const; + CrossSection WarpBatch( + std::function)> warpFunc) const; + CrossSection Simplify(double epsilon = 1e-6) const; + + // Adapted from Clipper2 docs: + // http://www.angusj.com/clipper2/Docs/Units/Clipper/Types/JoinType.htm + // (Copyright © 2010-2023 Angus Johnson) + /** + * Specifies the treatment of path/contour joins (corners) when offseting + * CrossSections. See the [Clipper2 + * doc](http://www.angusj.com/clipper2/Docs/Units/Clipper/Types/JoinType.htm) + * for illustrations. + */ + enum class JoinType { + Square, /*!< Squaring is applied uniformly at all joins where the internal + join angle is less that 90 degrees. The squared edge will be at + exactly the offset distance from the join vertex. */ + Round, /*!< Rounding is applied to all joins that have convex external + angles, and it maintains the exact offset distance from the join + vertex. */ + Miter /*!< There's a necessary limit to mitered joins (to avoid narrow + angled joins producing excessively long and narrow + [spikes](http://www.angusj.com/clipper2/Docs/Units/Clipper.Offset/Classes/ClipperOffset/Properties/MiterLimit.htm)). + So where mitered joins would exceed a given maximum miter distance + (relative to the offset distance), these are 'squared' instead. */ + }; + + CrossSection Offset(double delta, JoinType jt, double miter_limit = 2.0, + int circularSegments = 0) const; + ///@} + + /** @name Boolean + * Combine two manifolds + */ + ///@{ + CrossSection Boolean(const CrossSection& second, OpType op) const; + static CrossSection BatchBoolean( + const std::vector& crossSections, OpType op); + CrossSection operator+(const CrossSection&) const; + CrossSection& operator+=(const CrossSection&); + CrossSection operator-(const CrossSection&) const; + CrossSection& operator-=(const CrossSection&); + CrossSection operator^(const CrossSection&) const; + CrossSection& operator^=(const CrossSection&); + ///@} + + /** @name Topological + */ + ///@{ + static CrossSection Compose(std::vector&); + std::vector Decompose() const; + ///@} + + /** @name Convex Hulling + */ + ///@{ + CrossSection Hull() const; + static CrossSection Hull(const std::vector& crossSections); + static CrossSection Hull(const SimplePolygon poly); + static CrossSection Hull(const Polygons polys); + ///@} + /// + /** @name Conversion + */ + ///@{ + Polygons ToPolygons() const; + ///@} + + private: + mutable std::shared_ptr paths_; + mutable glm::mat3x2 transform_ = glm::mat3x2(1.0f); + CrossSection(std::shared_ptr paths); + std::shared_ptr GetPaths() const; +}; +/** @} */ +} // namespace manifold diff --git a/thirdparty/manifold/src/cross_section/src/cross_section.cpp b/thirdparty/manifold/src/cross_section/src/cross_section.cpp new file mode 100644 index 000000000000..a7bb15c29d59 --- /dev/null +++ b/thirdparty/manifold/src/cross_section/src/cross_section.cpp @@ -0,0 +1,795 @@ +// Copyright 2023 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "cross_section.h" + +#include "clipper2/clipper.core.h" +#include "clipper2/clipper.h" +#include "clipper2/clipper.offset.h" + +namespace C2 = Clipper2Lib; + +using namespace manifold; + +namespace manifold { +struct PathImpl { + PathImpl(const C2::PathsD paths_) : paths_(paths_) {} + operator const C2::PathsD&() const { return paths_; } + const C2::PathsD paths_; +}; +} // namespace manifold + +namespace { +const int precision_ = 8; + +C2::ClipType cliptype_of_op(OpType op) { + C2::ClipType ct = C2::ClipType::Union; + switch (op) { + case OpType::Add: + break; + case OpType::Subtract: + ct = C2::ClipType::Difference; + break; + case OpType::Intersect: + ct = C2::ClipType::Intersection; + break; + }; + return ct; +} + +C2::FillRule fr(CrossSection::FillRule fillrule) { + C2::FillRule fr = C2::FillRule::EvenOdd; + switch (fillrule) { + case CrossSection::FillRule::EvenOdd: + break; + case CrossSection::FillRule::NonZero: + fr = C2::FillRule::NonZero; + break; + case CrossSection::FillRule::Positive: + fr = C2::FillRule::Positive; + break; + case CrossSection::FillRule::Negative: + fr = C2::FillRule::Negative; + break; + }; + return fr; +} + +C2::JoinType jt(CrossSection::JoinType jointype) { + C2::JoinType jt = C2::JoinType::Square; + switch (jointype) { + case CrossSection::JoinType::Square: + break; + case CrossSection::JoinType::Round: + jt = C2::JoinType::Round; + break; + case CrossSection::JoinType::Miter: + jt = C2::JoinType::Miter; + break; + }; + return jt; +} + +glm::vec2 v2_of_pd(const C2::PointD p) { return {p.x, p.y}; } + +C2::PointD v2_to_pd(const glm::vec2 v) { return C2::PointD(v.x, v.y); } + +C2::PathD pathd_of_contour(const SimplePolygon& ctr) { + auto p = C2::PathD(); + p.reserve(ctr.size()); + for (auto v : ctr) { + p.push_back(v2_to_pd(v)); + } + return p; +} + +C2::PathsD transform(const C2::PathsD ps, const glm::mat3x2 m) { + const bool invert = glm::determinant(glm::mat2(m)) < 0; + auto transformed = C2::PathsD(); + transformed.reserve(ps.size()); + for (auto path : ps) { + auto sz = path.size(); + auto s = C2::PathD(sz); + for (int i = 0; i < sz; ++i) { + auto idx = invert ? sz - 1 - i : i; + s[idx] = v2_to_pd(m * glm::vec3(path[i].x, path[i].y, 1)); + } + transformed.push_back(s); + } + return transformed; +} + +std::shared_ptr shared_paths(const C2::PathsD& ps) { + return std::make_shared(ps); +} + +// forward declaration for mutual recursion +void decompose_hole(const C2::PolyTreeD* outline, + std::vector& polys, C2::PathsD& poly, + int n_holes, int j); + +void decompose_outline(const C2::PolyTreeD* tree, + std::vector& polys, int i) { + auto n_outlines = tree->Count(); + if (i < n_outlines) { + auto outline = tree->Child(i); + auto n_holes = outline->Count(); + auto poly = C2::PathsD(n_holes + 1); + poly[0] = outline->Polygon(); + decompose_hole(outline, polys, poly, n_holes, 0); + polys.push_back(poly); + if (i < n_outlines - 1) { + decompose_outline(tree, polys, i + 1); + } + } +} + +void decompose_hole(const C2::PolyTreeD* outline, + std::vector& polys, C2::PathsD& poly, + int n_holes, int j) { + if (j < n_holes) { + auto child = outline->Child(j); + decompose_outline(child, polys, 0); + poly[j + 1] = child->Polygon(); + decompose_hole(outline, polys, poly, n_holes, j + 1); + } +} + +void flatten(const C2::PolyTreeD* tree, C2::PathsD& polys, int i) { + auto n_outlines = tree->Count(); + if (i < n_outlines) { + auto outline = tree->Child(i); + flatten(outline, polys, 0); + polys.push_back(outline->Polygon()); + if (i < n_outlines - 1) { + flatten(tree, polys, i + 1); + } + } +} + +bool V2Lesser(glm::vec2 a, glm::vec2 b) { + if (a.x == b.x) return a.y < b.y; + return a.x < b.x; +} + +void HullBacktrack(const glm::vec2& pt, std::vector& stack) { + auto sz = stack.size(); + while (sz >= 2 && CCW(stack[sz - 2], stack[sz - 1], pt, 0.0f) <= 0.0f) { + stack.pop_back(); + sz = stack.size(); + } +} + +// Based on method described here: +// https://www.hackerearth.com/practice/math/geometry/line-sweep-technique/tutorial/ +// Changed to follow: +// https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain +// This is the same algorithm (Andrew, also called Montone Chain). +C2::PathD HullImpl(SimplePolygon& pts) { + int len = pts.size(); + if (len < 3) return C2::PathD(); // not enough points to create a polygon + std::sort(pts.begin(), pts.end(), V2Lesser); + + auto lower = std::vector{}; + for (int i = 0; i < len; i++) { + HullBacktrack(pts[i], lower); + lower.push_back(pts[i]); + } + auto upper = std::vector{}; + for (int i = len - 1; i >= 0; i--) { + HullBacktrack(pts[i], upper); + upper.push_back(pts[i]); + } + + upper.pop_back(); + lower.pop_back(); + + auto path = C2::PathD(lower.size() + upper.size()); + for (int i = 0; i < lower.size(); i++) { + path[i] = v2_to_pd(lower[i]); + } + auto llen = lower.size(); + int sz = upper.size(); // "fix" -Waggressive-loop-optimizations warning. + for (int i = 0; i < sz; i++) { + path[i + llen] = v2_to_pd(upper[i]); + } + return path; +} +} // namespace + +namespace manifold { + +/** + * The default constructor is an empty cross-section (containing no contours). + */ +CrossSection::CrossSection() { + paths_ = std::make_shared(C2::PathsD()); +} + +CrossSection::~CrossSection() = default; +CrossSection::CrossSection(CrossSection&&) noexcept = default; +CrossSection& CrossSection::operator=(CrossSection&&) noexcept = default; + +/** + * The copy constructor avoids copying the underlying paths vector (sharing + * with its parent via shared_ptr), however subsequent transformations, and + * their application will not be shared. It is generally recommended to avoid + * this, opting instead to simply create CrossSections with the available + * const methods. + */ +CrossSection::CrossSection(const CrossSection& other) { + paths_ = other.paths_; + transform_ = other.transform_; +} + +CrossSection& CrossSection::operator=(const CrossSection& other) { + if (this != &other) { + paths_ = other.paths_; + transform_ = other.transform_; + } + return *this; +}; + +// Private, skips unioning. +CrossSection::CrossSection(std::shared_ptr ps) { paths_ = ps; } + +/** + * Create a 2d cross-section from a single contour. A boolean union operation + * (with Positive filling rule by default) is performed to ensure the + * resulting CrossSection is free of self-intersections. + * + * @param contour A closed path outlining the desired cross-section. + * @param fillrule The filling rule used to interpret polygon sub-regions + * created by self-intersections in contour. + */ +CrossSection::CrossSection(const SimplePolygon& contour, FillRule fillrule) { + auto ps = C2::PathsD{(pathd_of_contour(contour))}; + paths_ = shared_paths(C2::Union(ps, fr(fillrule), precision_)); +} + +/** + * Create a 2d cross-section from a set of contours (complex polygons). A + * boolean union operation (with Positive filling rule by default) is + * performed to combine overlapping polygons and ensure the resulting + * CrossSection is free of intersections. + * + * @param contours A set of closed paths describing zero or more complex + * polygons. + * @param fillrule The filling rule used to interpret polygon sub-regions in + * contours. + */ +CrossSection::CrossSection(const Polygons& contours, FillRule fillrule) { + auto ps = C2::PathsD(); + ps.reserve(contours.size()); + for (auto ctr : contours) { + ps.push_back(pathd_of_contour(ctr)); + } + paths_ = shared_paths(C2::Union(ps, fr(fillrule), precision_)); +} + +/** + * Create a 2d cross-section from an axis-aligned rectangle (bounding box). + * + * @param rect An axis-aligned rectangular bounding box. + */ +CrossSection::CrossSection(const Rect& rect) { + C2::PathD p(4); + p[0] = C2::PointD(rect.min.x, rect.min.y); + p[1] = C2::PointD(rect.max.x, rect.min.y); + p[2] = C2::PointD(rect.max.x, rect.max.y); + p[3] = C2::PointD(rect.min.x, rect.max.y); + paths_ = shared_paths(C2::PathsD{p}); +} + +// Private +// All access to paths_ should be done through the GetPaths() method, which +// applies the accumulated transform_ +std::shared_ptr CrossSection::GetPaths() const { + if (transform_ == glm::mat3x2(1.0f)) { + return paths_; + } + paths_ = shared_paths(::transform(paths_->paths_, transform_)); + transform_ = glm::mat3x2(1.0f); + return paths_; +} + +/** + * Constructs a square with the given XY dimensions. By default it is + * positioned in the first quadrant, touching the origin. If any dimensions in + * size are negative, or if all are zero, an empty Manifold will be returned. + * + * @param size The X, and Y dimensions of the square. + * @param center Set to true to shift the center to the origin. + */ +CrossSection CrossSection::Square(const glm::vec2 size, bool center) { + if (size.x < 0.0f || size.y < 0.0f || glm::length(size) == 0.0f) { + return CrossSection(); + } + + auto p = C2::PathD(4); + if (center) { + const auto w = size.x / 2; + const auto h = size.y / 2; + p[0] = C2::PointD(w, h); + p[1] = C2::PointD(-w, h); + p[2] = C2::PointD(-w, -h); + p[3] = C2::PointD(w, -h); + } else { + const double x = size.x; + const double y = size.y; + p[0] = C2::PointD(0.0, 0.0); + p[1] = C2::PointD(x, 0.0); + p[2] = C2::PointD(x, y); + p[3] = C2::PointD(0.0, y); + } + return CrossSection(shared_paths(C2::PathsD{p})); +} + +/** + * Constructs a circle of a given radius. + * + * @param radius Radius of the circle. Must be positive. + * @param circularSegments Number of segments along its diameter. Default is + * calculated by the static Quality defaults according to the radius. + */ +CrossSection CrossSection::Circle(float radius, int circularSegments) { + if (radius <= 0.0f) { + return CrossSection(); + } + int n = circularSegments > 2 ? circularSegments + : Quality::GetCircularSegments(radius); + float dPhi = 360.0f / n; + auto circle = C2::PathD(n); + for (int i = 0; i < n; ++i) { + circle[i] = C2::PointD(radius * cosd(dPhi * i), radius * sind(dPhi * i)); + } + return CrossSection(shared_paths(C2::PathsD{circle})); +} + +/** + * Perform the given boolean operation between this and another CrossSection. + */ +CrossSection CrossSection::Boolean(const CrossSection& second, + OpType op) const { + auto ct = cliptype_of_op(op); + auto res = C2::BooleanOp(ct, C2::FillRule::Positive, GetPaths()->paths_, + second.GetPaths()->paths_, precision_); + return CrossSection(shared_paths(res)); +} + +/** + * Perform the given boolean operation on a list of CrossSections. In case of + * Subtract, all CrossSections in the tail are differenced from the head. + */ +CrossSection CrossSection::BatchBoolean( + const std::vector& crossSections, OpType op) { + if (crossSections.size() == 0) + return CrossSection(); + else if (crossSections.size() == 1) + return crossSections[0]; + + auto subjs = crossSections[0].GetPaths(); + int n_clips = 0; + for (int i = 1; i < crossSections.size(); ++i) { + n_clips += crossSections[i].GetPaths()->paths_.size(); + } + auto clips = C2::PathsD(); + clips.reserve(n_clips); + for (int i = 1; i < crossSections.size(); ++i) { + auto ps = crossSections[i].GetPaths(); + clips.insert(clips.end(), ps->paths_.begin(), ps->paths_.end()); + } + + auto ct = cliptype_of_op(op); + auto res = C2::BooleanOp(ct, C2::FillRule::Positive, subjs->paths_, clips, + precision_); + return CrossSection(shared_paths(res)); +} + +/** + * Compute the boolean union between two cross-sections. + */ +CrossSection CrossSection::operator+(const CrossSection& Q) const { + return Boolean(Q, OpType::Add); +} + +/** + * Compute the boolean union between two cross-sections, assigning the result + * to the first. + */ +CrossSection& CrossSection::operator+=(const CrossSection& Q) { + *this = *this + Q; + return *this; +} + +/** + * Compute the boolean difference of a (clip) cross-section from another + * (subject). + */ +CrossSection CrossSection::operator-(const CrossSection& Q) const { + return Boolean(Q, OpType::Subtract); +} + +/** + * Compute the boolean difference of a (clip) cross-section from a another + * (subject), assigning the result to the subject. + */ +CrossSection& CrossSection::operator-=(const CrossSection& Q) { + *this = *this - Q; + return *this; +} + +/** + * Compute the boolean intersection between two cross-sections. + */ +CrossSection CrossSection::operator^(const CrossSection& Q) const { + return Boolean(Q, OpType::Intersect); +} + +/** + * Compute the boolean intersection between two cross-sections, assigning the + * result to the first. + */ +CrossSection& CrossSection::operator^=(const CrossSection& Q) { + *this = *this ^ Q; + return *this; +} + +/** + * Construct a CrossSection from a vector of other CrossSections (batch + * boolean union). + */ +CrossSection CrossSection::Compose(std::vector& crossSections) { + return BatchBoolean(crossSections, OpType::Add); +} + +/** + * This operation returns a vector of CrossSections that are topologically + * disconnected, each containing one outline contour with zero or more + * holes. + */ +std::vector CrossSection::Decompose() const { + if (NumContour() < 2) { + return std::vector{CrossSection(*this)}; + } + + C2::PolyTreeD tree; + C2::BooleanOp(C2::ClipType::Union, C2::FillRule::Positive, GetPaths()->paths_, + C2::PathsD(), tree, precision_); + + auto polys = std::vector(); + decompose_outline(&tree, polys, 0); + + auto n_polys = polys.size(); + auto comps = std::vector(n_polys); + // reverse the stack while wrapping + for (int i = 0; i < n_polys; ++i) { + comps[n_polys - i - 1] = CrossSection(shared_paths(polys[i])); + } + + return comps; +} + +/** + * Move this CrossSection in space. This operation can be chained. Transforms + * are combined and applied lazily. + * + * @param v The vector to add to every vertex. + */ +CrossSection CrossSection::Translate(const glm::vec2 v) const { + glm::mat3x2 m(1.0f, 0.0f, // + 0.0f, 1.0f, // + v.x, v.y); + return Transform(m); +} + +/** + * Applies a (Z-axis) rotation to the CrossSection, in degrees. This operation + * can be chained. Transforms are combined and applied lazily. + * + * @param degrees degrees about the Z-axis to rotate. + */ +CrossSection CrossSection::Rotate(float degrees) const { + auto s = sind(degrees); + auto c = cosd(degrees); + glm::mat3x2 m(c, s, // + -s, c, // + 0.0f, 0.0f); + return Transform(m); +} + +/** + * Scale this CrossSection in space. This operation can be chained. Transforms + * are combined and applied lazily. + * + * @param v The vector to multiply every vertex by per component. + */ +CrossSection CrossSection::Scale(const glm::vec2 scale) const { + glm::mat3x2 m(scale.x, 0.0f, // + 0.0f, scale.y, // + 0.0f, 0.0f); + return Transform(m); +} + +/** + * Mirror this CrossSection over the arbitrary axis described by the unit form + * of the given vector. If the length of the vector is zero, an empty + * CrossSection is returned. This operation can be chained. Transforms are + * combined and applied lazily. + * + * @param ax the axis to be mirrored over + */ +CrossSection CrossSection::Mirror(const glm::vec2 ax) const { + if (glm::length(ax) == 0.) { + return CrossSection(); + } + auto n = glm::normalize(glm::abs(ax)); + auto m = glm::mat3x2(glm::mat2(1.0f) - 2.0f * glm::outerProduct(n, n)); + return Transform(m); +} + +/** + * Transform this CrossSection in space. The first two columns form a 2x2 + * matrix transform and the last is a translation vector. This operation can + * be chained. Transforms are combined and applied lazily. + * + * @param m The affine transform matrix to apply to all the vertices. + */ +CrossSection CrossSection::Transform(const glm::mat3x2& m) const { + auto transformed = CrossSection(); + transformed.transform_ = m * glm::mat3(transform_); + transformed.paths_ = paths_; + return transformed; +} + +/** + * Move the vertices of this CrossSection (creating a new one) according to + * any arbitrary input function, followed by a union operation (with a + * Positive fill rule) that ensures any introduced intersections are not + * included in the result. + * + * @param warpFunc A function that modifies a given vertex position. + */ +CrossSection CrossSection::Warp( + std::function warpFunc) const { + return WarpBatch([&warpFunc](VecView vecs) { + for (glm::vec2& p : vecs) { + warpFunc(p); + } + }); +} + +/** + * Same as CrossSection::Warp but calls warpFunc with + * a VecView which is roughly equivalent to std::span + * pointing to all vec2 elements to be modified in-place + * + * @param warpFunc A function that modifies multiple vertex positions. + */ +CrossSection CrossSection::WarpBatch( + std::function)> warpFunc) const { + std::vector tmp_verts; + C2::PathsD paths = GetPaths()->paths_; // deep copy + for (C2::PathD const& path : paths) { + for (C2::PointD const& p : path) { + tmp_verts.push_back(v2_of_pd(p)); + } + } + + warpFunc(VecView(tmp_verts.data(), tmp_verts.size())); + + auto cursor = tmp_verts.begin(); + for (C2::PathD& path : paths) { + for (C2::PointD& p : path) { + p = v2_to_pd(*cursor); + ++cursor; + } + } + + return CrossSection( + shared_paths(C2::Union(paths, C2::FillRule::Positive, precision_))); +} + +/** + * Remove vertices from the contours in this CrossSection that are less than + * the specified distance epsilon from an imaginary line that passes through + * its two adjacent vertices. Near duplicate vertices and collinear points + * will be removed at lower epsilons, with elimination of line segments + * becoming increasingly aggressive with larger epsilons. + * + * It is recommended to apply this function following Offset, in order to + * clean up any spurious tiny line segments introduced that do not improve + * quality in any meaningful way. This is particularly important if further + * offseting operations are to be performed, which would compound the issue. + */ +CrossSection CrossSection::Simplify(double epsilon) const { + C2::PolyTreeD tree; + C2::BooleanOp(C2::ClipType::Union, C2::FillRule::Positive, GetPaths()->paths_, + C2::PathsD(), tree, precision_); + + C2::PathsD polys; + flatten(&tree, polys, 0); + + // Filter out contours less than epsilon wide. + C2::PathsD filtered; + for (C2::PathD poly : polys) { + auto area = C2::Area(poly); + Rect box; + for (auto vert : poly) { + box.Union(glm::vec2(vert.x, vert.y)); + } + glm::vec2 size = box.Size(); + if (glm::abs(area) > glm::max(size.x, size.y) * epsilon) { + filtered.push_back(poly); + } + } + + auto ps = SimplifyPaths(filtered, epsilon, true); + return CrossSection(shared_paths(ps)); +} + +/** + * Inflate the contours in CrossSection by the specified delta, handling + * corners according to the given JoinType. + * + * @param delta Positive deltas will cause the expansion of outlining contours + * to expand, and retraction of inner (hole) contours. Negative deltas will + * have the opposite effect. + * @param jt The join type specifying the treatment of contour joins + * (corners). + * @param miter_limit The maximum distance in multiples of delta that vertices + * can be offset from their original positions with before squaring is + * applied, when the join type is Miter (default is 2, which is the + * minimum allowed). See the [Clipper2 + * MiterLimit](http://www.angusj.com/clipper2/Docs/Units/Clipper.Offset/Classes/ClipperOffset/Properties/MiterLimit.htm) + * page for a visual example. + * @param circularSegments Number of segments per 360 degrees of + * JoinType::Round corners (roughly, the number of vertices that + * will be added to each contour). Default is calculated by the static Quality + * defaults according to the radius. + */ +CrossSection CrossSection::Offset(double delta, JoinType jointype, + double miter_limit, + int circularSegments) const { + double arc_tol = 0.; + if (jointype == JoinType::Round) { + int n = circularSegments > 2 ? circularSegments + : Quality::GetCircularSegments(delta); + // This calculates tolerance as a function of circular segments and delta + // (radius) in order to get back the same number of segments in Clipper2: + // steps_per_360 = PI / acos(1 - arc_tol / abs_delta) + const double abs_delta = std::fabs(delta); + const double scaled_delta = abs_delta * std::pow(10, precision_); + arc_tol = (std::cos(Clipper2Lib::PI / n) - 1) * -scaled_delta; + } + auto ps = + C2::InflatePaths(GetPaths()->paths_, delta, jt(jointype), + C2::EndType::Polygon, miter_limit, precision_, arc_tol); + return CrossSection(shared_paths(ps)); +} + +/** + * Compute the convex hull enveloping a set of cross-sections. + * + * @param crossSections A vector of cross-sections over which to compute a + * convex hull. + */ +CrossSection CrossSection::Hull( + const std::vector& crossSections) { + int n = 0; + for (auto cs : crossSections) n += cs.NumVert(); + SimplePolygon pts; + pts.reserve(n); + for (auto cs : crossSections) { + auto paths = cs.GetPaths()->paths_; + for (auto path : paths) { + for (auto p : path) { + pts.push_back(v2_of_pd(p)); + } + } + } + return CrossSection(shared_paths(C2::PathsD{HullImpl(pts)})); +} + +/** + * Compute the convex hull of this cross-section. + */ +CrossSection CrossSection::Hull() const { + return Hull(std::vector{*this}); +} + +/** + * Compute the convex hull of a set of points. If the given points are fewer + * than 3, an empty CrossSection will be returned. + * + * @param pts A vector of 2-dimensional points over which to compute a convex + * hull. + */ +CrossSection CrossSection::Hull(SimplePolygon pts) { + return CrossSection(shared_paths(C2::PathsD{HullImpl(pts)})); +} + +/** + * Compute the convex hull of a set of points/polygons. If the given points are + * fewer than 3, an empty CrossSection will be returned. + * + * @param pts A vector of vectors of 2-dimensional points over which to compute + * a convex hull. + */ +CrossSection CrossSection::Hull(const Polygons polys) { + SimplePolygon pts; + for (auto poly : polys) { + for (auto p : poly) { + pts.push_back(p); + } + } + return Hull(pts); +} + +/** + * Return the total area covered by complex polygons making up the + * CrossSection. + */ +double CrossSection::Area() const { return C2::Area(GetPaths()->paths_); } + +/** + * Return the number of vertices in the CrossSection. + */ +int CrossSection::NumVert() const { + int n = 0; + auto paths = GetPaths()->paths_; + for (auto p : paths) { + n += p.size(); + } + return n; +} + +/** + * Return the number of contours (both outer and inner paths) in the + * CrossSection. + */ +int CrossSection::NumContour() const { return GetPaths()->paths_.size(); } + +/** + * Does the CrossSection contain any contours? + */ +bool CrossSection::IsEmpty() const { return GetPaths()->paths_.empty(); } + +/** + * Returns the axis-aligned bounding rectangle of all the CrossSections' + * vertices. + */ +Rect CrossSection::Bounds() const { + auto r = C2::GetBounds(GetPaths()->paths_); + return Rect({r.left, r.bottom}, {r.right, r.top}); +} + +/** + * Return the contours of this CrossSection as a Polygons. + */ +Polygons CrossSection::ToPolygons() const { + auto polys = Polygons(); + auto paths = GetPaths()->paths_; + polys.reserve(paths.size()); + for (auto p : paths) { + auto sp = SimplePolygon(); + sp.reserve(p.size()); + for (auto v : p) { + sp.push_back({v.x, v.y}); + } + polys.push_back(sp); + } + return polys; +} +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/include/manifold.h b/thirdparty/manifold/src/manifold/include/manifold.h new file mode 100644 index 000000000000..6d8772bc9fd9 --- /dev/null +++ b/thirdparty/manifold/src/manifold/include/manifold.h @@ -0,0 +1,311 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include + +#include "cross_section.h" +#include "public.h" +#include "vec_view.h" + +namespace manifold { + +/** + * @ingroup Debug + * + * Allows modification of the assertions checked in MANIFOLD_DEBUG mode. + * + * @return ExecutionParams& + */ +ExecutionParams& ManifoldParams(); + +class CsgNode; +class CsgLeafNode; + +/** @ingroup Connections + * @{ + */ + +/** + * An alternative to Mesh for output suitable for pushing into graphics + * libraries directly. This may not be manifold since the verts are duplicated + * along property boundaries that do not match. The additional merge vectors + * store this missing information, allowing the manifold to be reconstructed. + */ +struct MeshGL { + /// Number of property vertices + uint32_t NumVert() const { + if (vertProperties.size() / numProp >= static_cast::size_type>(std::numeric_limits::max())) + throw std::out_of_range("mesh too large"); + return vertProperties.size() / numProp; + }; + /// Number of triangles + uint32_t NumTri() const { + if (vertProperties.size() / numProp >= static_cast::size_type>(std::numeric_limits::max())) + throw std::out_of_range("mesh too large"); + return triVerts.size() / 3; + }; + + /// Number of properties per vertex, always >= 3. + uint32_t numProp = 3; + /// Flat, GL-style interleaved list of all vertex properties: propVal = + /// vertProperties[vert * numProp + propIdx]. The first three properties are + /// always the position x, y, z. + std::vector vertProperties; + /// The vertex indices of the three triangle corners in CCW (from the outside) + /// order, for each triangle. + std::vector triVerts; + /// Optional: A list of only the vertex indicies that need to be merged to + /// reconstruct the manifold. + std::vector mergeFromVert; + /// Optional: The same length as mergeFromVert, and the corresponding value + /// contains the vertex to merge with. It will have an identical position, but + /// the other properties may differ. + std::vector mergeToVert; + /// Optional: Indicates runs of triangles that correspond to a particular + /// input mesh instance. The runs encompass all of triVerts and are sorted + /// by runOriginalID. Run i begins at triVerts[runIndex[i]] and ends at + /// triVerts[runIndex[i+1]]. All runIndex values are divisible by 3. + std::vector runIndex; + /// Optional: The OriginalID of the mesh this triangle run came from. This ID + /// is ideal for reapplying materials to the output mesh. Multiple runs may + /// have the same ID, e.g. representing different copies of the same input + /// mesh. If you create an input MeshGL that you want to be able to reference + /// as one or more originals, be sure to set unique values from ReserveIDs(). + std::vector runOriginalID; + /// Optional: For each run, a 3x4 transform is stored representing how the + /// corresponding original mesh was transformed to create this triangle run. + /// This matrix is stored in column-major order and the length of the overall + /// vector is 12 * runOriginalID.size(). + std::vector runTransform; + /// Optional: Length NumTri, contains an ID of the source face this triangle + /// comes from. When auto-generated, this ID will be a triangle index into the + /// original mesh. All neighboring coplanar triangles from that input mesh + /// will refer to a single triangle of that group as the faceID. When + /// supplying faceIDs, ensure that triangles with the same ID are in fact + /// coplanar and have consistent properties (within some tolerance) or the + /// output will be surprising. + std::vector faceID; + /// Optional: The X-Y-Z-W weighted tangent vectors for smooth Refine(). If + /// non-empty, must be exactly four times as long as Mesh.triVerts. Indexed + /// as 4 * (3 * tri + i) + j, i < 3, j < 4, representing the tangent value + /// Mesh.triVerts[tri][i] along the CCW edge. If empty, mesh is faceted. + std::vector halfedgeTangent; + /// The absolute precision of the vertex positions, based on accrued rounding + /// errors. When creating a Manifold, the precision used will be the maximum + /// of this and a baseline precision from the size of the bounding box. Any + /// edge shorter than precision may be collapsed. + float precision = 0; + + MeshGL() = default; + MeshGL(const Mesh& mesh); + + bool Merge(); +}; +/** @} */ + +/** @defgroup Core + * @brief The central classes of the library + * @{ + */ + +/** + * This library's internal representation of an oriented, 2-manifold, triangle + * mesh - a simple boundary-representation of a solid object. Use this class to + * store and operate on solids, and use MeshGL for input and output, or + * potentially Mesh if only basic geometry is required. + * + * In addition to storing geometric data, a Manifold can also store an arbitrary + * number of vertex properties. These could be anything, e.g. normals, UV + * coordinates, colors, etc, but this library is completely agnostic. All + * properties are merely float values indexed by channel number. It is up to the + * user to associate channel numbers with meaning. + * + * Manifold allows vertex properties to be shared for efficient storage, or to + * have multiple property verts associated with a single geometric vertex, + * allowing sudden property changes, e.g. at Boolean intersections, without + * sacrificing manifoldness. + * + * Manifolds also keep track of their relationships to their inputs, via + * OriginalIDs and the faceIDs and transforms accessible through MeshGL. This + * allows object-level properties to be re-associated with the output after many + * operations, particularly useful for materials. Since separate object's + * properties are not mixed, there is no requirement that channels have + * consistent meaning between different inputs. + */ +class Manifold { + public: + /** @name Creation + * Constructors + */ + ///@{ + Manifold(); + ~Manifold(); + Manifold(const Manifold& other); + Manifold& operator=(const Manifold& other); + Manifold(Manifold&&) noexcept; + Manifold& operator=(Manifold&&) noexcept; + + Manifold(const MeshGL&, const std::vector& propertyTolerance = {}); + Manifold(const Mesh&); + + static Manifold Smooth(const MeshGL&, + const std::vector& sharpenedEdges = {}); + static Manifold Smooth(const Mesh&, + const std::vector& sharpenedEdges = {}); + static Manifold Tetrahedron(); + static Manifold Cube(glm::vec3 size = glm::vec3(1.0f), bool center = false); + static Manifold Cylinder(float height, float radiusLow, + float radiusHigh = -1.0f, int circularSegments = 0, + bool center = false); + static Manifold Sphere(float radius, int circularSegments = 0); + static Manifold Extrude(const CrossSection& crossSection, float height, + int nDivisions = 0, float twistDegrees = 0.0f, + glm::vec2 scaleTop = glm::vec2(1.0f)); + static Manifold Revolve(const CrossSection& crossSection, + int circularSegments = 0, + float revolveDegrees = 360.0f); + ///@} + + /** @name Topological + * No geometric calculations. + */ + ///@{ + static Manifold Compose(const std::vector&); + std::vector Decompose() const; + ///@} + + /** @name Information + * Details of the manifold + */ + ///@{ + Mesh GetMesh() const; + MeshGL GetMeshGL(glm::ivec3 normalIdx = glm::ivec3(0)) const; + bool IsEmpty() const; + enum class Error { + NoError, + NonFiniteVertex, + NotManifold, + VertexOutOfBounds, + PropertiesWrongLength, + MissingPositionProperties, + MergeVectorsDifferentLengths, + MergeIndexOutOfBounds, + TransformWrongLength, + RunIndexWrongLength, + FaceIDWrongLength, + InvalidConstruction, + }; + Error Status() const; + int NumVert() const; + int NumEdge() const; + int NumTri() const; + int NumProp() const; + int NumPropVert() const; + Box BoundingBox() const; + float Precision() const; + int Genus() const; + Properties GetProperties() const; + float MinGap(const Manifold& other, float searchLength) const; + ///@} + + /** @name Mesh ID + * Details of the manifold's relation to its input meshes, for the purposes + * of reapplying mesh properties. + */ + ///@{ + int OriginalID() const; + Manifold AsOriginal() const; + static uint32_t ReserveIDs(uint32_t); + ///@} + + /** @name Modification + */ + ///@{ + Manifold Translate(glm::vec3) const; + Manifold Scale(glm::vec3) const; + Manifold Rotate(float xDegrees, float yDegrees = 0.0f, + float zDegrees = 0.0f) const; + Manifold Transform(const glm::mat4x3&) const; + Manifold Mirror(glm::vec3) const; + Manifold Warp(std::function) const; + Manifold WarpBatch(std::function)>) const; + Manifold SetProperties( + int, std::function) const; + Manifold CalculateCurvature(int gaussianIdx, int meanIdx) const; + Manifold CalculateNormals(int normalIdx, float minSharpAngle = 60) const; + Manifold SmoothByNormals(int normalIdx) const; + Manifold SmoothOut(float minSharpAngle = 60, float minSmoothness = 0) const; + Manifold Refine(int) const; + Manifold RefineToLength(float) const; + // Manifold RefineToPrecision(float); + ///@} + + /** @name Boolean + * Combine two manifolds + */ + ///@{ + Manifold Boolean(const Manifold& second, OpType op) const; + static Manifold BatchBoolean(const std::vector& manifolds, + OpType op); + // Boolean operation shorthand + Manifold operator+(const Manifold&) const; // Add (Union) + Manifold& operator+=(const Manifold&); + Manifold operator-(const Manifold&) const; // Subtract (Difference) + Manifold& operator-=(const Manifold&); + Manifold operator^(const Manifold&) const; // Intersect + Manifold& operator^=(const Manifold&); + std::pair Split(const Manifold&) const; + std::pair SplitByPlane(glm::vec3 normal, + float originOffset) const; + Manifold TrimByPlane(glm::vec3 normal, float originOffset) const; + ///@} + + /** @name 2D from 3D + */ + ///@{ + CrossSection Slice(float height = 0) const; + CrossSection Project() const; + ///@} + + /** @name Convex hull + */ + ///@{ + Manifold Hull() const; + static Manifold Hull(const std::vector& manifolds); + static Manifold Hull(const std::vector& pts); + ///@} + + /** @name Testing hooks + * These are just for internal testing. + */ + ///@{ + bool MatchesTriNormals() const; + int NumDegenerateTris() const; + int NumOverlaps(const Manifold& second) const; + ///@} + + struct Impl; + + private: + Manifold(std::shared_ptr pNode_); + Manifold(std::shared_ptr pImpl_); + static Manifold Invalid(); + mutable std::shared_ptr pNode_; + + CsgLeafNode& GetCsgLeafNode() const; +}; +/** @} */ +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/boolean3.cpp b/thirdparty/manifold/src/manifold/src/boolean3.cpp new file mode 100644 index 000000000000..5ed8660d30f0 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/boolean3.cpp @@ -0,0 +1,602 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "boolean3.h" + +#include + +#include "par.h" + +using namespace manifold; + +namespace { + +// These two functions (Interpolate and Intersect) are the only places where +// floating-point operations take place in the whole Boolean function. These are +// carefully designed to minimize rounding error and to eliminate it at edge +// cases to ensure consistency. + +glm::vec2 Interpolate(glm::vec3 pL, glm::vec3 pR, float x) { + const float dxL = x - pL.x; + const float dxR = x - pR.x; + ASSERT(dxL * dxR <= 0, logicErr, "Boolean manifold error: not in domain"); + const bool useL = fabs(dxL) < fabs(dxR); + const glm::vec3 dLR = pR - pL; + const float lambda = (useL ? dxL : dxR) / dLR.x; + if (!isfinite(lambda) || !isfinite(dLR.y) || !isfinite(dLR.z)) + return glm::vec2(pL.y, pL.z); + glm::vec2 yz; + yz[0] = (useL ? pL.y : pR.y) + lambda * dLR.y; + yz[1] = (useL ? pL.z : pR.z) + lambda * dLR.z; + return yz; +} + +glm::vec4 Intersect(const glm::vec3 &pL, const glm::vec3 &pR, + const glm::vec3 &qL, const glm::vec3 &qR) { + const float dyL = qL.y - pL.y; + const float dyR = qR.y - pR.y; + ASSERT(dyL * dyR <= 0, logicErr, "Boolean manifold error: no intersection"); + const bool useL = fabs(dyL) < fabs(dyR); + const float dx = pR.x - pL.x; + float lambda = (useL ? dyL : dyR) / (dyL - dyR); + if (!isfinite(lambda)) lambda = 0.0f; + glm::vec4 xyzz; + xyzz.x = (useL ? pL.x : pR.x) + lambda * dx; + const float pDy = pR.y - pL.y; + const float qDy = qR.y - qL.y; + const bool useP = fabs(pDy) < fabs(qDy); + xyzz.y = (useL ? (useP ? pL.y : qL.y) : (useP ? pR.y : qR.y)) + + lambda * (useP ? pDy : qDy); + xyzz.z = (useL ? pL.z : pR.z) + lambda * (pR.z - pL.z); + xyzz.w = (useL ? qL.z : qR.z) + lambda * (qR.z - qL.z); + return xyzz; +} + +template +struct CopyFaceEdges { + const SparseIndices &p1q1; + // const int *p1q1; + // x can be either vert or edge (0 or 1). + SparseIndices &pXq1; + VecView halfedgesQ; + + void operator()(thrust::tuple in) { + int idx = 3 * thrust::get<0>(in); + size_t i = thrust::get<1>(in); + int pX = p1q1.Get(i, inverted); + int q2 = p1q1.Get(i, !inverted); + + for (const int j : {0, 1, 2}) { + const int q1 = 3 * q2 + j; + const Halfedge edge = halfedgesQ[q1]; + int a = pX; + int b = edge.IsForward() ? q1 : edge.pairedHalfedge; + if (inverted) std::swap(a, b); + pXq1.Set(idx + static_cast(j), a, b); + } + } +}; + +SparseIndices Filter11(const Manifold::Impl &inP, const Manifold::Impl &inQ, + const SparseIndices &p1q2, const SparseIndices &p2q1) { + ZoneScoped; + SparseIndices p1q1(3 * p1q2.size() + 3 * p2q1.size()); + for_each_n(autoPolicy(p1q2.size()), zip(countAt(0_z), countAt(0_z)), + p1q2.size(), CopyFaceEdges({p1q2, p1q1, inQ.halfedge_})); + for_each_n(autoPolicy(p2q1.size()), zip(countAt(p1q2.size()), countAt(0_z)), + p2q1.size(), CopyFaceEdges({p2q1, p1q1, inP.halfedge_})); + p1q1.Unique(); + return p1q1; +} + +inline bool Shadows(float p, float q, float dir) { + return p == q ? dir < 0 : p < q; +} + +inline thrust::pair Shadow01( + const int p0, const int q1, VecView vertPosP, + VecView vertPosQ, VecView halfedgeQ, + const float expandP, VecView normalP, const bool reverse) { + const int q1s = halfedgeQ[q1].startVert; + const int q1e = halfedgeQ[q1].endVert; + const float p0x = vertPosP[p0].x; + const float q1sx = vertPosQ[q1s].x; + const float q1ex = vertPosQ[q1e].x; + int s01 = reverse ? Shadows(q1sx, p0x, expandP * normalP[q1s].x) - + Shadows(q1ex, p0x, expandP * normalP[q1e].x) + : Shadows(p0x, q1ex, expandP * normalP[p0].x) - + Shadows(p0x, q1sx, expandP * normalP[p0].x); + glm::vec2 yz01(NAN); + + if (s01 != 0) { + yz01 = Interpolate(vertPosQ[q1s], vertPosQ[q1e], vertPosP[p0].x); + if (reverse) { + glm::vec3 diff = vertPosQ[q1s] - vertPosP[p0]; + const float start2 = glm::dot(diff, diff); + diff = vertPosQ[q1e] - vertPosP[p0]; + const float end2 = glm::dot(diff, diff); + const float dir = start2 < end2 ? normalP[q1s].y : normalP[q1e].y; + if (!Shadows(yz01[0], vertPosP[p0].y, expandP * dir)) s01 = 0; + } else { + if (!Shadows(vertPosP[p0].y, yz01[0], expandP * normalP[p0].y)) s01 = 0; + } + } + return thrust::make_pair(s01, yz01); +} + +// https://github.com/scandum/binary_search/blob/master/README.md +// much faster than standard binary search on large arrays +size_t monobound_quaternary_search(VecView array, int64_t key) { + if (array.size() == 0) { + return -1; + } + size_t bot = 0; + size_t top = array.size(); + while (top >= 65536) { + size_t mid = top / 4; + top -= mid * 3; + if (key < array[bot + mid * 2]) { + if (key >= array[bot + mid]) { + bot += mid; + } + } else { + bot += mid * 2; + if (key >= array[bot + mid]) { + bot += mid; + } + } + } + + while (top > 3) { + size_t mid = top / 2; + if (key >= array[bot + mid]) { + bot += mid; + } + top -= mid; + } + + while (top--) { + if (key == array[bot + top]) { + return bot + top; + } + } + return -1; +} + +struct Kernel11 { + VecView vertPosP; + VecView vertPosQ; + VecView halfedgeP; + VecView halfedgeQ; + float expandP; + VecView normalP; + const SparseIndices &p1q1; + + void operator()(thrust::tuple inout) { + const int p1 = p1q1.Get(thrust::get<0>(inout), false); + const int q1 = p1q1.Get(thrust::get<0>(inout), true); + glm::vec4 &xyzz11 = thrust::get<1>(inout); + int &s11 = thrust::get<2>(inout); + + // For pRL[k], qRL[k], k==0 is the left and k==1 is the right. + int k = 0; + glm::vec3 pRL[2], qRL[2]; + // Either the left or right must shadow, but not both. This ensures the + // intersection is between the left and right. + bool shadows = false; + s11 = 0; + + const int p0[2] = {halfedgeP[p1].startVert, halfedgeP[p1].endVert}; + for (int i : {0, 1}) { + const auto syz01 = Shadow01(p0[i], q1, vertPosP, vertPosQ, halfedgeQ, + expandP, normalP, false); + const int s01 = syz01.first; + const glm::vec2 yz01 = syz01.second; + // If the value is NaN, then these do not overlap. + if (isfinite(yz01[0])) { + s11 += s01 * (i == 0 ? -1 : 1); + if (k < 2 && (k == 0 || (s01 != 0) != shadows)) { + shadows = s01 != 0; + pRL[k] = vertPosP[p0[i]]; + qRL[k] = glm::vec3(pRL[k].x, yz01); + ++k; + } + } + } + + const int q0[2] = {halfedgeQ[q1].startVert, halfedgeQ[q1].endVert}; + for (int i : {0, 1}) { + const auto syz10 = Shadow01(q0[i], p1, vertPosQ, vertPosP, halfedgeP, + expandP, normalP, true); + const int s10 = syz10.first; + const glm::vec2 yz10 = syz10.second; + // If the value is NaN, then these do not overlap. + if (isfinite(yz10[0])) { + s11 += s10 * (i == 0 ? -1 : 1); + if (k < 2 && (k == 0 || (s10 != 0) != shadows)) { + shadows = s10 != 0; + qRL[k] = vertPosQ[q0[i]]; + pRL[k] = glm::vec3(qRL[k].x, yz10); + ++k; + } + } + } + + if (s11 == 0) { // No intersection + xyzz11 = glm::vec4(NAN); + } else { + ASSERT(k == 2, logicErr, "Boolean manifold error: s11"); + xyzz11 = Intersect(pRL[0], pRL[1], qRL[0], qRL[1]); + + const int p1s = halfedgeP[p1].startVert; + const int p1e = halfedgeP[p1].endVert; + glm::vec3 diff = vertPosP[p1s] - glm::vec3(xyzz11); + const float start2 = glm::dot(diff, diff); + diff = vertPosP[p1e] - glm::vec3(xyzz11); + const float end2 = glm::dot(diff, diff); + const float dir = start2 < end2 ? normalP[p1s].z : normalP[p1e].z; + + if (!Shadows(xyzz11.z, xyzz11.w, expandP * dir)) s11 = 0; + } + } +}; + +std::tuple, Vec> Shadow11(SparseIndices &p1q1, + const Manifold::Impl &inP, + const Manifold::Impl &inQ, + float expandP) { + ZoneScoped; + Vec s11(p1q1.size()); + Vec xyzz11(p1q1.size()); + + for_each_n(autoPolicy(p1q1.size()), + zip(countAt(0_z), xyzz11.begin(), s11.begin()), p1q1.size(), + Kernel11({inP.vertPos_, inQ.vertPos_, inP.halfedge_, inQ.halfedge_, + expandP, inP.vertNormal_, p1q1})); + + p1q1.KeepFinite(xyzz11, s11); + + return std::make_tuple(s11, xyzz11); +}; + +struct Kernel02 { + VecView vertPosP; + VecView halfedgeQ; + VecView vertPosQ; + const float expandP; + VecView vertNormalP; + const SparseIndices &p0q2; + const bool forward; + + void operator()(thrust::tuple inout) { + const int p0 = p0q2.Get(thrust::get<0>(inout), !forward); + const int q2 = p0q2.Get(thrust::get<0>(inout), forward); + int &s02 = thrust::get<1>(inout); + float &z02 = thrust::get<2>(inout); + + // For yzzLR[k], k==0 is the left and k==1 is the right. + int k = 0; + glm::vec3 yzzRL[2]; + // Either the left or right must shadow, but not both. This ensures the + // intersection is between the left and right. + bool shadows = false; + int closestVert = -1; + float minMetric = std::numeric_limits::infinity(); + s02 = 0; + + const glm::vec3 posP = vertPosP[p0]; + for (const int i : {0, 1, 2}) { + const int q1 = 3 * q2 + i; + const Halfedge edge = halfedgeQ[q1]; + const int q1F = edge.IsForward() ? q1 : edge.pairedHalfedge; + + if (!forward) { + const int qVert = halfedgeQ[q1F].startVert; + const glm::vec3 diff = posP - vertPosQ[qVert]; + const float metric = glm::dot(diff, diff); + if (metric < minMetric) { + minMetric = metric; + closestVert = qVert; + } + } + + const auto syz01 = Shadow01(p0, q1F, vertPosP, vertPosQ, halfedgeQ, + expandP, vertNormalP, !forward); + const int s01 = syz01.first; + const glm::vec2 yz01 = syz01.second; + // If the value is NaN, then these do not overlap. + if (isfinite(yz01[0])) { + s02 += s01 * (forward == edge.IsForward() ? -1 : 1); + if (k < 2 && (k == 0 || (s01 != 0) != shadows)) { + shadows = s01 != 0; + yzzRL[k++] = glm::vec3(yz01[0], yz01[1], yz01[1]); + } + } + } + + if (s02 == 0) { // No intersection + z02 = NAN; + } else { + ASSERT(k == 2, logicErr, "Boolean manifold error: s02"); + glm::vec3 vertPos = vertPosP[p0]; + z02 = Interpolate(yzzRL[0], yzzRL[1], vertPos.y)[1]; + if (forward) { + if (!Shadows(vertPos.z, z02, expandP * vertNormalP[p0].z)) s02 = 0; + } else { + // ASSERT(closestVert != -1, topologyErr, "No closest vert"); + if (!Shadows(z02, vertPos.z, expandP * vertNormalP[closestVert].z)) + s02 = 0; + } + } + } +}; + +std::tuple, Vec> Shadow02(const Manifold::Impl &inP, + const Manifold::Impl &inQ, + SparseIndices &p0q2, bool forward, + float expandP) { + ZoneScoped; + Vec s02(p0q2.size()); + Vec z02(p0q2.size()); + + auto vertNormalP = forward ? inP.vertNormal_ : inQ.vertNormal_; + for_each_n(autoPolicy(p0q2.size()), + zip(countAt(0_z), s02.begin(), z02.begin()), p0q2.size(), + Kernel02({inP.vertPos_, inQ.halfedge_, inQ.vertPos_, expandP, + vertNormalP, p0q2, forward})); + + p0q2.KeepFinite(z02, s02); + + return std::make_tuple(s02, z02); +}; + +struct Kernel12 { + VecView p0q2; + VecView s02; + VecView z02; + VecView p1q1; + VecView s11; + VecView xyzz11; + VecView halfedgesP; + VecView halfedgesQ; + VecView vertPosP; + const bool forward; + const SparseIndices &p1q2; + + void operator()(thrust::tuple inout) { + int p1 = p1q2.Get(thrust::get<0>(inout), !forward); + int q2 = p1q2.Get(thrust::get<0>(inout), forward); + int &x12 = thrust::get<1>(inout); + glm::vec3 &v12 = thrust::get<2>(inout); + + // For xzyLR-[k], k==0 is the left and k==1 is the right. + int k = 0; + glm::vec3 xzyLR0[2]; + glm::vec3 xzyLR1[2]; + // Either the left or right must shadow, but not both. This ensures the + // intersection is between the left and right. + bool shadows = false; + x12 = 0; + + const Halfedge edge = halfedgesP[p1]; + + for (int vert : {edge.startVert, edge.endVert}) { + const int64_t key = forward ? SparseIndices::EncodePQ(vert, q2) + : SparseIndices::EncodePQ(q2, vert); + const size_t idx = monobound_quaternary_search(p0q2, key); + if (idx != -1) { + const int s = s02[idx]; + x12 += s * ((vert == edge.startVert) == forward ? 1 : -1); + if (k < 2 && (k == 0 || (s != 0) != shadows)) { + shadows = s != 0; + xzyLR0[k] = vertPosP[vert]; + thrust::swap(xzyLR0[k].y, xzyLR0[k].z); + xzyLR1[k] = xzyLR0[k]; + xzyLR1[k][1] = z02[idx]; + k++; + } + } + } + + for (const int i : {0, 1, 2}) { + const int q1 = 3 * q2 + i; + const Halfedge edge = halfedgesQ[q1]; + const int q1F = edge.IsForward() ? q1 : edge.pairedHalfedge; + const int64_t key = forward ? SparseIndices::EncodePQ(p1, q1F) + : SparseIndices::EncodePQ(q1F, p1); + const size_t idx = monobound_quaternary_search(p1q1, key); + if (idx != -1) { // s is implicitly zero for anything not found + const int s = s11[idx]; + x12 -= s * (edge.IsForward() ? 1 : -1); + if (k < 2 && (k == 0 || (s != 0) != shadows)) { + shadows = s != 0; + const glm::vec4 xyzz = xyzz11[idx]; + xzyLR0[k][0] = xyzz.x; + xzyLR0[k][1] = xyzz.z; + xzyLR0[k][2] = xyzz.y; + xzyLR1[k] = xzyLR0[k]; + xzyLR1[k][1] = xyzz.w; + if (!forward) thrust::swap(xzyLR0[k][1], xzyLR1[k][1]); + k++; + } + } + } + + if (x12 == 0) { // No intersection + v12 = glm::vec3(NAN); + } else { + ASSERT(k == 2, logicErr, "Boolean manifold error: v12"); + const glm::vec4 xzyy = + Intersect(xzyLR0[0], xzyLR0[1], xzyLR1[0], xzyLR1[1]); + v12.x = xzyy[0]; + v12.y = xzyy[2]; + v12.z = xzyy[1]; + } + } +}; + +std::tuple, Vec> Intersect12( + const Manifold::Impl &inP, const Manifold::Impl &inQ, const Vec &s02, + const SparseIndices &p0q2, const Vec &s11, const SparseIndices &p1q1, + const Vec &z02, const Vec &xyzz11, SparseIndices &p1q2, + bool forward) { + ZoneScoped; + Vec x12(p1q2.size()); + Vec v12(p1q2.size()); + + for_each_n( + autoPolicy(p1q2.size()), zip(countAt(0_z), x12.begin(), v12.begin()), + p1q2.size(), + Kernel12({p0q2.AsVec64(), s02, z02, p1q1.AsVec64(), s11, xyzz11, + inP.halfedge_, inQ.halfedge_, inP.vertPos_, forward, p1q2})); + + p1q2.KeepFinite(v12, x12); + + return std::make_tuple(x12, v12); +}; + +Vec Winding03(const Manifold::Impl &inP, Vec &vertices, Vec &s02, + bool reverse) { + ZoneScoped; + // verts that are not shadowed (not in p0q2) have winding number zero. + Vec w03(inP.NumVert(), 0); + // checking is slow, so just sort and reduce + auto policy = autoPolicy(vertices.size()); + stable_sort( + policy, zip(vertices.begin(), s02.begin()), + zip(vertices.end(), s02.end()), + [](const thrust::tuple &a, const thrust::tuple &b) { + return thrust::get<0>(a) < thrust::get<0>(b); + }); + Vec w03val(w03.size()); + Vec w03vert(w03.size()); + // sum known s02 values into w03 (winding number) + auto endPair = reduce_by_key< + thrust::pair>( + policy, vertices.begin(), vertices.end(), s02.begin(), w03vert.begin(), + w03val.begin()); + scatter(policy, w03val.begin(), endPair.second, w03vert.begin(), w03.begin()); + + if (reverse) + transform(policy, w03.begin(), w03.end(), w03.begin(), + thrust::negate()); + return w03; +}; +} // namespace + +namespace manifold { +Boolean3::Boolean3(const Manifold::Impl &inP, const Manifold::Impl &inQ, + OpType op) + : inP_(inP), inQ_(inQ), expandP_(op == OpType::Add ? 1.0 : -1.0) { + // Symbolic perturbation: + // Union -> expand inP + // Difference, Intersection -> contract inP + +#ifdef MANIFOLD_DEBUG + Timer broad; + broad.Start(); +#endif + + if (inP.IsEmpty() || inQ.IsEmpty() || !inP.bBox_.DoesOverlap(inQ.bBox_)) { + PRINT("No overlap, early out"); + w03_.resize(inP.NumVert(), 0); + w30_.resize(inQ.NumVert(), 0); + return; + } + + // Level 3 + // Find edge-triangle overlaps (broad phase) + p1q2_ = inQ_.EdgeCollisions(inP_); + p2q1_ = inP_.EdgeCollisions(inQ_, true); // inverted + + p1q2_.Sort(); + PRINT("p1q2 size = " << p1q2_.size()); + + p2q1_.Sort(); + PRINT("p2q1 size = " << p2q1_.size()); + + // Level 2 + // Find vertices that overlap faces in XY-projection + SparseIndices p0q2 = inQ.VertexCollisionsZ(inP.vertPos_); + p0q2.Sort(); + PRINT("p0q2 size = " << p0q2.size()); + + SparseIndices p2q0 = inP.VertexCollisionsZ(inQ.vertPos_, true); // inverted + p2q0.Sort(); + PRINT("p2q0 size = " << p2q0.size()); + + // Find involved edge pairs from Level 3 + SparseIndices p1q1 = Filter11(inP_, inQ_, p1q2_, p2q1_); + PRINT("p1q1 size = " << p1q1.size()); + +#ifdef MANIFOLD_DEBUG + broad.Stop(); + Timer intersections; + intersections.Start(); +#endif + + // Level 2 + // Build up XY-projection intersection of two edges, including the z-value for + // each edge, keeping only those whose intersection exists. + Vec s11; + Vec xyzz11; + std::tie(s11, xyzz11) = Shadow11(p1q1, inP, inQ, expandP_); + PRINT("s11 size = " << s11.size()); + + // Build up Z-projection of vertices onto triangles, keeping only those that + // fall inside the triangle. + Vec s02; + Vec z02; + std::tie(s02, z02) = Shadow02(inP, inQ, p0q2, true, expandP_); + PRINT("s02 size = " << s02.size()); + + Vec s20; + Vec z20; + std::tie(s20, z20) = Shadow02(inQ, inP, p2q0, false, expandP_); + PRINT("s20 size = " << s20.size()); + + // Level 3 + // Build up the intersection of the edges and triangles, keeping only those + // that intersect, and record the direction the edge is passing through the + // triangle. + std::tie(x12_, v12_) = + Intersect12(inP, inQ, s02, p0q2, s11, p1q1, z02, xyzz11, p1q2_, true); + PRINT("x12 size = " << x12_.size()); + + std::tie(x21_, v21_) = + Intersect12(inQ, inP, s20, p2q0, s11, p1q1, z20, xyzz11, p2q1_, false); + PRINT("x21 size = " << x21_.size()); + + if (x12_.size() + x21_.size() >= std::numeric_limits::max()) + throw std::out_of_range("mesh too large"); + + Vec p0 = p0q2.Copy(false); + p0q2.Resize(0); + Vec q0 = p2q0.Copy(true); + p2q0.Resize(0); + // Sum up the winding numbers of all vertices. + w03_ = Winding03(inP, p0, s02, false); + + w30_ = Winding03(inQ, q0, s20, true); + +#ifdef MANIFOLD_DEBUG + intersections.Stop(); + + if (ManifoldParams().verbose) { + broad.Print("Broad phase"); + intersections.Print("Intersections"); + } +#endif +} +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/boolean3.h b/thirdparty/manifold/src/manifold/src/boolean3.h new file mode 100644 index 000000000000..9491084c9cfe --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/boolean3.h @@ -0,0 +1,60 @@ +// Copyright 2020 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "impl.h" + +#ifdef MANIFOLD_DEBUG +#define PRINT(msg) \ + if (ManifoldParams().verbose) std::cout << msg << std::endl; +#else +#define PRINT(msg) +#endif + +/** + * The notation in these files is abbreviated due to the complexity of the + * functions involved. The key is that the input manifolds are P and Q, while + * the output is R, and these letters in both upper and lower case refer to + * these objects. Operations are based on dimensionality: vert: 0, edge: 1, + * face: 2, solid: 3. X denotes a winding-number type quantity from the source + * paper of this algorithm, while S is closely related but includes only the + * subset of X values which "shadow" (are on the correct side of). + * + * Nearly everything here are sparse arrays, where for instance each pair in + * p2q1 refers to a face index of P interacting with a halfedge index of Q. + * Adjacent arrays like x21 refer to the values of X corresponding to each + * sparse index pair. + * + * Note many functions are designed to work symmetrically, for instance for both + * p2q1 and p1q2. Inside of these functions P and Q are marked as though the + * function is forwards, but it may include a Boolean "reverse" that indicates P + * and Q have been swapped. + */ + +namespace manifold { + +/** @ingroup Private */ +class Boolean3 { + public: + Boolean3(const Manifold::Impl& inP, const Manifold::Impl& inQ, OpType op); + Manifold::Impl Result(OpType op) const; + + private: + const Manifold::Impl &inP_, &inQ_; + const float expandP_; + SparseIndices p1q2_, p2q1_; + Vec x12_, x21_, w03_, w30_; + Vec v12_, v21_; +}; +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/boolean_result.cpp b/thirdparty/manifold/src/manifold/src/boolean_result.cpp new file mode 100644 index 000000000000..c6750aca201f --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/boolean_result.cpp @@ -0,0 +1,831 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#if MANIFOLD_PAR == 'T' && __has_include() +#define TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS 1 +#include +#include + +template +using concurrent_map = tbb::concurrent_map; +#else +template +// not really concurrent when tbb is disabled +using concurrent_map = std::map; +#endif +#include "boolean3.h" +#include "par.h" +#include "polygon.h" + +using namespace manifold; +using namespace thrust::placeholders; + +template <> +struct std::hash> { + size_t operator()(const std::pair &p) const { + return std::hash()(p.first) ^ std::hash()(p.second); + } +}; + +namespace { + +constexpr int kParallelThreshold = 128; + +struct AbsSum : public thrust::binary_function { + int operator()(int a, int b) { return abs(a) + abs(b); } +}; + +struct DuplicateVerts { + VecView vertPosR; + + void operator()(thrust::tuple in) { + int inclusion = abs(thrust::get<0>(in)); + int vertR = thrust::get<1>(in); + glm::vec3 vertPosP = thrust::get<2>(in); + + for (int i = 0; i < inclusion; ++i) { + vertPosR[vertR + i] = vertPosP; + } + } +}; + +struct CountVerts { + VecView count; + VecView inclusion; + + void operator()(const Halfedge &edge) { + AtomicAdd(count[edge.face], glm::abs(inclusion[edge.startVert])); + } +}; + +template +struct CountNewVerts { + VecView countP; + VecView countQ; + const SparseIndices &pq; + VecView halfedges; + + void operator()(thrust::tuple in) { + int edgeP = pq.Get(thrust::get<0>(in), inverted); + int faceQ = pq.Get(thrust::get<0>(in), !inverted); + int inclusion = glm::abs(thrust::get<1>(in)); + + AtomicAdd(countQ[faceQ], inclusion); + const Halfedge half = halfedges[edgeP]; + AtomicAdd(countP[half.face], inclusion); + AtomicAdd(countP[halfedges[half.pairedHalfedge].face], inclusion); + } +}; + +struct NotZero : public thrust::unary_function { + int operator()(int x) const { return x > 0 ? 1 : 0; } +}; + +std::tuple, Vec> SizeOutput( + Manifold::Impl &outR, const Manifold::Impl &inP, const Manifold::Impl &inQ, + const Vec &i03, const Vec &i30, const Vec &i12, + const Vec &i21, const SparseIndices &p1q2, const SparseIndices &p2q1, + bool invertQ) { + ZoneScoped; + Vec sidesPerFacePQ(inP.NumTri() + inQ.NumTri(), 0); + // note: numFaceR <= facePQ2R.size() = sidesPerFacePQ.size() + 1 + if (sidesPerFacePQ.size() + 1 >= std::numeric_limits::max()) + throw std::out_of_range("boolean result too large"); + + auto sidesPerFaceP = sidesPerFacePQ.view(0, inP.NumTri()); + auto sidesPerFaceQ = sidesPerFacePQ.view(inP.NumTri(), inQ.NumTri()); + + for_each(autoPolicy(inP.halfedge_.size()), inP.halfedge_.begin(), + inP.halfedge_.end(), CountVerts({sidesPerFaceP, i03})); + for_each(autoPolicy(inP.halfedge_.size()), inQ.halfedge_.begin(), + inQ.halfedge_.end(), CountVerts({sidesPerFaceQ, i30})); + for_each_n(autoPolicy(i12.size()), zip(countAt(0), i12.begin()), i12.size(), + CountNewVerts( + {sidesPerFaceP, sidesPerFaceQ, p1q2, inP.halfedge_})); + for_each_n( + autoPolicy(i21.size()), zip(countAt(0), i21.begin()), i21.size(), + CountNewVerts({sidesPerFaceQ, sidesPerFaceP, p2q1, inQ.halfedge_})); + + Vec facePQ2R(inP.NumTri() + inQ.NumTri() + 1, 0); + auto keepFace = + thrust::make_transform_iterator(sidesPerFacePQ.begin(), NotZero()); + inclusive_scan(autoPolicy(sidesPerFacePQ.size()), keepFace, + keepFace + sidesPerFacePQ.size(), facePQ2R.begin() + 1); + int numFaceR = facePQ2R.back(); + facePQ2R.resize(inP.NumTri() + inQ.NumTri()); + + outR.faceNormal_.resize(numFaceR); + auto next = copy_if( + autoPolicy(inP.faceNormal_.size()), inP.faceNormal_.begin(), + inP.faceNormal_.end(), keepFace, outR.faceNormal_.begin(), + thrust::identity()); + if (invertQ) { + auto start = thrust::make_transform_iterator(inQ.faceNormal_.begin(), + thrust::negate()); + auto end = thrust::make_transform_iterator(inQ.faceNormal_.end(), + thrust::negate()); + copy_if( + autoPolicy(inQ.faceNormal_.size()), start, end, keepFace + inP.NumTri(), + next, thrust::identity()); + } else { + copy_if( + autoPolicy(inQ.faceNormal_.size()), inQ.faceNormal_.begin(), + inQ.faceNormal_.end(), keepFace + inP.NumTri(), next, + thrust::identity()); + } + + auto newEnd = remove( + autoPolicy(sidesPerFacePQ.size()), sidesPerFacePQ.begin(), + sidesPerFacePQ.end(), 0); + Vec faceEdge(newEnd - sidesPerFacePQ.begin() + 1, 0); + inclusive_scan(autoPolicy(std::distance(sidesPerFacePQ.begin(), newEnd)), + sidesPerFacePQ.begin(), newEnd, faceEdge.begin() + 1); + outR.halfedge_.resize(faceEdge.back()); + + return std::make_tuple(faceEdge, facePQ2R); +} + +struct EdgePos { + int vert; + float edgePos; + bool isStart; +}; + +void AddNewEdgeVerts( + // we need concurrent_map because we will be adding things concurrently + concurrent_map> &edgesP, + concurrent_map, std::vector> &edgesNew, + const SparseIndices &p1q2, const Vec &i12, const Vec &v12R, + const Vec &halfedgeP, bool forward) { + ZoneScoped; + // For each edge of P that intersects a face of Q (p1q2), add this vertex to + // P's corresponding edge vector and to the two new edges, which are + // intersections between the face of Q and the two faces of P attached to the + // edge. The direction and duplicity are given by i12, while v12R remaps to + // the output vert index. When forward is false, all is reversed. + auto process = [&](std::function lock, + std::function unlock, size_t i) { + const int edgeP = p1q2.Get(i, !forward); + const int faceQ = p1q2.Get(i, forward); + const int vert = v12R[i]; + const int inclusion = i12[i]; + + Halfedge halfedge = halfedgeP[edgeP]; + std::pair keyRight = {halfedgeP[halfedge.pairedHalfedge].face, + faceQ}; + if (!forward) std::swap(keyRight.first, keyRight.second); + + std::pair keyLeft = {halfedge.face, faceQ}; + if (!forward) std::swap(keyLeft.first, keyLeft.second); + + bool direction = inclusion < 0; + std::hash> pairHasher; + std::array *>, 3> edges = { + std::make_tuple(direction, std::hash{}(edgeP), &edgesP[edgeP]), + std::make_tuple(direction ^ !forward, // revert if not forward + pairHasher(keyRight), &edgesNew[keyRight]), + std::make_tuple(direction ^ forward, // revert if forward + pairHasher(keyLeft), &edgesNew[keyLeft])}; + for (const auto &tuple : edges) { + lock(std::get<1>(tuple)); + for (int j = 0; j < glm::abs(inclusion); ++j) + std::get<2>(tuple)->push_back({vert + j, 0.0f, std::get<0>(tuple)}); + unlock(std::get<1>(tuple)); + direction = !direction; + } + }; +#if MANIFOLD_PAR == 'T' && __has_include() + // parallelize operations, requires concurrent_map so we can only enable this + // with tbb + if (!ManifoldParams().deterministic && p1q2.size() > kParallelThreshold) { + // ideally we should have 1 mutex per key, but kParallelThreshold is enough + // to avoid contention for most of the cases + std::array mutexes; + static tbb::affinity_partitioner ap; + auto processFun = std::bind( + process, [&](size_t hash) { mutexes[hash % mutexes.size()].lock(); }, + [&](size_t hash) { mutexes[hash % mutexes.size()].unlock(); }, + std::placeholders::_1); + tbb::parallel_for( + tbb::blocked_range(0_z, p1q2.size(), 32), + [&](const tbb::blocked_range &range) { + for (size_t i = range.begin(); i != range.end(); i++) processFun(i); + }, + ap); + return; + } +#endif + auto processFun = std::bind( + process, [](size_t _) {}, [](size_t _) {}, std::placeholders::_1); + for (size_t i = 0; i < p1q2.size(); ++i) processFun(i); +} + +std::vector PairUp(std::vector &edgePos) { + // Pair start vertices with end vertices to form edges. The choice of pairing + // is arbitrary for the manifoldness guarantee, but must be ordered to be + // geometrically valid. If the order does not go start-end-start-end... then + // the input and output are not geometrically valid and this algorithm becomes + // a heuristic. + ASSERT(edgePos.size() % 2 == 0, topologyErr, + "Non-manifold edge! Not an even number of points."); + size_t nEdges = edgePos.size() / 2; + auto middle = std::partition(edgePos.begin(), edgePos.end(), + [](EdgePos x) { return x.isStart; }); + ASSERT(middle - edgePos.begin() == nEdges, topologyErr, "Non-manifold edge!"); + auto cmp = [](EdgePos a, EdgePos b) { return a.edgePos < b.edgePos; }; + std::stable_sort(edgePos.begin(), middle, cmp); + std::stable_sort(middle, edgePos.end(), cmp); + std::vector edges; + for (size_t i = 0; i < nEdges; ++i) + edges.push_back({edgePos[i].vert, edgePos[i + nEdges].vert, -1, -1}); + return edges; +} + +void AppendPartialEdges(Manifold::Impl &outR, Vec &wholeHalfedgeP, + Vec &facePtrR, + concurrent_map> &edgesP, + Vec &halfedgeRef, const Manifold::Impl &inP, + const Vec &i03, const Vec &vP2R, + const Vec::IterC faceP2R, bool forward) { + ZoneScoped; + // Each edge in the map is partially retained; for each of these, look up + // their original verts and include them based on their winding number (i03), + // while remapping them to the output using vP2R. Use the verts position + // projected along the edge vector to pair them up, then distribute these + // edges to their faces. + Vec &halfedgeR = outR.halfedge_; + const Vec &vertPosP = inP.vertPos_; + const Vec &halfedgeP = inP.halfedge_; + + for (auto &value : edgesP) { + const int edgeP = value.first; + std::vector &edgePosP = value.second; + + const Halfedge &halfedge = halfedgeP[edgeP]; + wholeHalfedgeP[edgeP] = false; + wholeHalfedgeP[halfedge.pairedHalfedge] = false; + + const int vStart = halfedge.startVert; + const int vEnd = halfedge.endVert; + const glm::vec3 edgeVec = vertPosP[vEnd] - vertPosP[vStart]; + // Fill in the edge positions of the old points. + for (EdgePos &edge : edgePosP) { + edge.edgePos = glm::dot(outR.vertPos_[edge.vert], edgeVec); + } + + int inclusion = i03[vStart]; + EdgePos edgePos = {vP2R[vStart], + glm::dot(outR.vertPos_[vP2R[vStart]], edgeVec), + inclusion > 0}; + for (int j = 0; j < glm::abs(inclusion); ++j) { + edgePosP.push_back(edgePos); + ++edgePos.vert; + } + + inclusion = i03[vEnd]; + edgePos = {vP2R[vEnd], glm::dot(outR.vertPos_[vP2R[vEnd]], edgeVec), + inclusion < 0}; + for (int j = 0; j < glm::abs(inclusion); ++j) { + edgePosP.push_back(edgePos); + ++edgePos.vert; + } + + // sort edges into start/end pairs along length + std::vector edges = PairUp(edgePosP); + + // add halfedges to result + const int faceLeftP = halfedge.face; + const int faceLeft = faceP2R[faceLeftP]; + const int faceRightP = halfedgeP[halfedge.pairedHalfedge].face; + const int faceRight = faceP2R[faceRightP]; + // Negative inclusion means the halfedges are reversed, which means our + // reference is now to the endVert instead of the startVert, which is one + // position advanced CCW. This is only valid if this is a retained vert; it + // will be ignored later if the vert is new. + const TriRef forwardRef = {forward ? 0 : 1, -1, faceLeftP}; + const TriRef backwardRef = {forward ? 0 : 1, -1, faceRightP}; + + for (Halfedge e : edges) { + const int forwardEdge = facePtrR[faceLeft]++; + const int backwardEdge = facePtrR[faceRight]++; + + e.face = faceLeft; + e.pairedHalfedge = backwardEdge; + halfedgeR[forwardEdge] = e; + halfedgeRef[forwardEdge] = forwardRef; + + std::swap(e.startVert, e.endVert); + e.face = faceRight; + e.pairedHalfedge = forwardEdge; + halfedgeR[backwardEdge] = e; + halfedgeRef[backwardEdge] = backwardRef; + } + } +} + +void AppendNewEdges( + Manifold::Impl &outR, Vec &facePtrR, + concurrent_map, std::vector> &edgesNew, + Vec &halfedgeRef, const Vec &facePQ2R, const int numFaceP) { + ZoneScoped; + // Pair up each edge's verts and distribute to faces based on indices in key. + Vec &halfedgeR = outR.halfedge_; + Vec &vertPosR = outR.vertPos_; + + for (auto &value : edgesNew) { + const int faceP = value.first.first; + const int faceQ = value.first.second; + std::vector &edgePos = value.second; + + Box bbox; + for (auto edge : edgePos) { + bbox.Union(vertPosR[edge.vert]); + } + const glm::vec3 size = bbox.Size(); + // Order the points along their longest dimension. + const int i = (size.x > size.y && size.x > size.z) ? 0 + : size.y > size.z ? 1 + : 2; + for (auto &edge : edgePos) { + edge.edgePos = vertPosR[edge.vert][i]; + } + + // sort edges into start/end pairs along length. + std::vector edges = PairUp(edgePos); + + // add halfedges to result + const int faceLeft = facePQ2R[faceP]; + const int faceRight = facePQ2R[numFaceP + faceQ]; + const TriRef forwardRef = {0, -1, faceP}; + const TriRef backwardRef = {1, -1, faceQ}; + for (Halfedge e : edges) { + const int forwardEdge = facePtrR[faceLeft]++; + const int backwardEdge = facePtrR[faceRight]++; + + e.face = faceLeft; + e.pairedHalfedge = backwardEdge; + halfedgeR[forwardEdge] = e; + halfedgeRef[forwardEdge] = forwardRef; + + std::swap(e.startVert, e.endVert); + e.face = faceRight; + e.pairedHalfedge = forwardEdge; + halfedgeR[backwardEdge] = e; + halfedgeRef[backwardEdge] = backwardRef; + } + } +} + +struct DuplicateHalfedges { + VecView halfedgesR; + VecView halfedgeRef; + VecView facePtr; + VecView halfedgesP; + VecView i03; + VecView vP2R; + VecView faceP2R; + const bool forward; + + void operator()(thrust::tuple in) { + if (!thrust::get<0>(in)) return; + Halfedge halfedge = thrust::get<1>(in); + if (!halfedge.IsForward()) return; + + const int inclusion = i03[halfedge.startVert]; + if (inclusion == 0) return; + if (inclusion < 0) { // reverse + int tmp = halfedge.startVert; + halfedge.startVert = halfedge.endVert; + halfedge.endVert = tmp; + } + halfedge.startVert = vP2R[halfedge.startVert]; + halfedge.endVert = vP2R[halfedge.endVert]; + const int faceLeftP = halfedge.face; + halfedge.face = faceP2R[faceLeftP]; + const int faceRightP = halfedgesP[halfedge.pairedHalfedge].face; + const int faceRight = faceP2R[faceRightP]; + // Negative inclusion means the halfedges are reversed, which means our + // reference is now to the endVert instead of the startVert, which is one + // position advanced CCW. + const TriRef forwardRef = {forward ? 0 : 1, -1, faceLeftP}; + const TriRef backwardRef = {forward ? 0 : 1, -1, faceRightP}; + + for (int i = 0; i < glm::abs(inclusion); ++i) { + int forwardEdge = AtomicAdd(facePtr[halfedge.face], 1); + int backwardEdge = AtomicAdd(facePtr[faceRight], 1); + halfedge.pairedHalfedge = backwardEdge; + + halfedgesR[forwardEdge] = halfedge; + halfedgesR[backwardEdge] = {halfedge.endVert, halfedge.startVert, + forwardEdge, faceRight}; + halfedgeRef[forwardEdge] = forwardRef; + halfedgeRef[backwardEdge] = backwardRef; + + ++halfedge.startVert; + ++halfedge.endVert; + } + } +}; + +void AppendWholeEdges(Manifold::Impl &outR, Vec &facePtrR, + Vec &halfedgeRef, const Manifold::Impl &inP, + const Vec wholeHalfedgeP, const Vec &i03, + const Vec &vP2R, VecView faceP2R, + bool forward) { + ZoneScoped; + for_each_n(ManifoldParams().deterministic ? ExecutionPolicy::Seq + : autoPolicy(inP.halfedge_.size()), + zip(wholeHalfedgeP.begin(), inP.halfedge_.begin(), countAt(0)), + inP.halfedge_.size(), + DuplicateHalfedges({outR.halfedge_, halfedgeRef, facePtrR, + inP.halfedge_, i03, vP2R, faceP2R, forward})); +} + +struct MapTriRef { + VecView triRefP; + VecView triRefQ; + const int offsetQ; + + void operator()(TriRef &triRef) { + const int tri = triRef.tri; + const bool PQ = triRef.meshID == 0; + triRef = PQ ? triRefP[tri] : triRefQ[tri]; + if (!PQ) triRef.meshID += offsetQ; + } +}; + +void UpdateReference(Manifold::Impl &outR, const Manifold::Impl &inP, + const Manifold::Impl &inQ, bool invertQ) { + const int offsetQ = Manifold::Impl::meshIDCounter_; + for_each_n( + autoPolicy(outR.NumTri()), outR.meshRelation_.triRef.begin(), + outR.NumTri(), + MapTriRef({inP.meshRelation_.triRef, inQ.meshRelation_.triRef, offsetQ})); + + for (const auto &pair : inP.meshRelation_.meshIDtransform) { + outR.meshRelation_.meshIDtransform[pair.first] = pair.second; + } + for (const auto &pair : inQ.meshRelation_.meshIDtransform) { + outR.meshRelation_.meshIDtransform[pair.first + offsetQ] = pair.second; + outR.meshRelation_.meshIDtransform[pair.first + offsetQ].backSide ^= + invertQ; + } +} + +struct Barycentric { + VecView uvw; + VecView vertPosP; + VecView vertPosQ; + VecView vertPosR; + VecView halfedgeP; + VecView halfedgeQ; + VecView halfedgeR; + const float precision; + + void operator()(thrust::tuple in) { + const int tri = thrust::get<0>(in); + const TriRef refPQ = thrust::get<1>(in); + if (halfedgeR[3 * tri].startVert < 0) return; + + const int triPQ = refPQ.tri; + const bool PQ = refPQ.meshID == 0; + const auto &vertPos = PQ ? vertPosP : vertPosQ; + const auto &halfedge = PQ ? halfedgeP : halfedgeQ; + + glm::mat3 triPos; + for (const int j : {0, 1, 2}) + triPos[j] = vertPos[halfedge[3 * triPQ + j].startVert]; + + for (const int i : {0, 1, 2}) { + const int vert = halfedgeR[3 * tri + i].startVert; + uvw[3 * tri + i] = GetBarycentric(vertPosR[vert], triPos, precision); + } + } +}; + +void CreateProperties(Manifold::Impl &outR, const Manifold::Impl &inP, + const Manifold::Impl &inQ) { + ZoneScoped; + const int numPropP = inP.NumProp(); + const int numPropQ = inQ.NumProp(); + const int numProp = glm::max(numPropP, numPropQ); + outR.meshRelation_.numProp = numProp; + if (numProp == 0) return; + + const int numTri = outR.NumTri(); + outR.meshRelation_.triProperties.resize(numTri); + + Vec bary(outR.halfedge_.size()); + for_each_n(autoPolicy(numTri), + zip(countAt(0), outR.meshRelation_.triRef.cbegin()), numTri, + Barycentric({bary, inP.vertPos_, inQ.vertPos_, outR.vertPos_, + inP.halfedge_, inQ.halfedge_, outR.halfedge_, + outR.precision_})); + + using Entry = std::pair; + int idMissProp = outR.NumVert(); + std::vector> propIdx(outR.NumVert() + 1); + std::vector propMissIdx[2]; + propMissIdx[0].resize(inQ.NumPropVert(), -1); + propMissIdx[1].resize(inP.NumPropVert(), -1); + + if (static_cast(outR.NumVert()) * static_cast(numProp) >= + std::numeric_limits::max()) + throw std::out_of_range("too many vertices"); + + outR.meshRelation_.properties.reserve(outR.NumVert() * numProp); + int idx = 0; + + for (int tri = 0; tri < numTri; ++tri) { + // Skip collapsed triangles + if (outR.halfedge_[3 * tri].startVert < 0) continue; + + const TriRef ref = outR.meshRelation_.triRef[tri]; + const bool PQ = ref.meshID == 0; + const int oldNumProp = PQ ? numPropP : numPropQ; + const auto &properties = + PQ ? inP.meshRelation_.properties : inQ.meshRelation_.properties; + const glm::ivec3 &triProp = oldNumProp == 0 ? glm::ivec3(-1) + : PQ ? inP.meshRelation_.triProperties[ref.tri] + : inQ.meshRelation_.triProperties[ref.tri]; + + for (const int i : {0, 1, 2}) { + const int vert = outR.halfedge_[3 * tri + i].startVert; + const glm::vec3 &uvw = bary[3 * tri + i]; + + glm::ivec4 key(PQ, idMissProp, -1, -1); + if (oldNumProp > 0) { + int edge = -2; + for (const int j : {0, 1, 2}) { + if (uvw[j] == 1) { + // On a retained vert, the propVert must also match + key[2] = triProp[j]; + edge = -1; + break; + } + if (uvw[j] == 0) edge = j; + } + if (edge >= 0) { + // On an edge, both propVerts must match + const int p0 = triProp[Next3(edge)]; + const int p1 = triProp[Prev3(edge)]; + key[1] = vert; + key[2] = glm::min(p0, p1); + key[3] = glm::max(p0, p1); + } else if (edge == -2) { + key[1] = vert; + } + } + + if (key.y == idMissProp && key.z >= 0) { + // only key.x/key.z matters + auto &entry = propMissIdx[key.x][key.z]; + if (entry >= 0) { + outR.meshRelation_.triProperties[tri][i] = entry; + continue; + } + entry = idx; + } else { + auto &bin = propIdx[key.y]; + bool bFound = false; + for (int k = 0; k < bin.size(); ++k) { + if (bin[k].first == glm::ivec3(key.x, key.z, key.w)) { + bFound = true; + outR.meshRelation_.triProperties[tri][i] = bin[k].second; + break; + } + } + if (bFound) continue; + bin.push_back(std::make_pair(glm::ivec3(key.x, key.z, key.w), idx)); + } + + outR.meshRelation_.triProperties[tri][i] = idx++; + for (int p = 0; p < numProp; ++p) { + if (p < oldNumProp) { + glm::vec3 oldProps; + for (const int j : {0, 1, 2}) + oldProps[j] = properties[oldNumProp * triProp[j] + p]; + outR.meshRelation_.properties.push_back(glm::dot(uvw, oldProps)); + } else { + outR.meshRelation_.properties.push_back(0); + } + } + } + } +} +} // namespace + +namespace manifold { + +Manifold::Impl Boolean3::Result(OpType op) const { +#ifdef MANIFOLD_DEBUG + Timer assemble; + assemble.Start(); +#endif + + ASSERT((expandP_ > 0) == (op == OpType::Add), logicErr, + "Result op type not compatible with constructor op type."); + const int c1 = op == OpType::Intersect ? 0 : 1; + const int c2 = op == OpType::Add ? 1 : 0; + const int c3 = op == OpType::Intersect ? 1 : -1; + + if (inP_.IsEmpty()) { + if (!inQ_.IsEmpty() && op == OpType::Add) { + return inQ_; + } + return Manifold::Impl(); + } else if (inQ_.IsEmpty()) { + if (op == OpType::Intersect) { + return Manifold::Impl(); + } + return inP_; + } + + const bool invertQ = op == OpType::Subtract; + + // Convert winding numbers to inclusion values based on operation type. + Vec i12(x12_.size()); + Vec i21(x21_.size()); + Vec i03(w03_.size()); + Vec i30(w30_.size()); + + transform(autoPolicy(x12_.size()), x12_.begin(), x12_.end(), i12.begin(), + c3 * _1); + transform(autoPolicy(x21_.size()), x21_.begin(), x21_.end(), i21.begin(), + c3 * _1); + transform(autoPolicy(w03_.size()), w03_.begin(), w03_.end(), i03.begin(), + c1 + c3 * _1); + transform(autoPolicy(w30_.size()), w30_.begin(), w30_.end(), i30.begin(), + c2 + c3 * _1); + + Vec vP2R(inP_.NumVert()); + exclusive_scan(autoPolicy(i03.size()), i03.begin(), i03.end(), vP2R.begin(), + 0, AbsSum()); + int numVertR = AbsSum()(vP2R.back(), i03.back()); + const int nPv = numVertR; + + Vec vQ2R(inQ_.NumVert()); + exclusive_scan(autoPolicy(i30.size()), i30.begin(), i30.end(), vQ2R.begin(), + numVertR, AbsSum()); + numVertR = AbsSum()(vQ2R.back(), i30.back()); + const int nQv = numVertR - nPv; + + Vec v12R(v12_.size()); + if (v12_.size() > 0) { + exclusive_scan(autoPolicy(i12.size()), i12.begin(), i12.end(), v12R.begin(), + numVertR, AbsSum()); + numVertR = AbsSum()(v12R.back(), i12.back()); + } + const int n12 = numVertR - nPv - nQv; + + Vec v21R(v21_.size()); + if (v21_.size() > 0) { + exclusive_scan(autoPolicy(i21.size()), i21.begin(), i21.end(), v21R.begin(), + numVertR, AbsSum()); + numVertR = AbsSum()(v21R.back(), i21.back()); + } + const int n21 = numVertR - nPv - nQv - n12; + + // Create the output Manifold + Manifold::Impl outR; + + if (numVertR == 0) return outR; + + outR.precision_ = glm::max(inP_.precision_, inQ_.precision_); + + outR.vertPos_.resize(numVertR); + // Add vertices, duplicating for inclusion numbers not in [-1, 1]. + // Retained vertices from P and Q: + for_each_n(autoPolicy(inP_.NumVert()), + zip(i03.begin(), vP2R.begin(), inP_.vertPos_.begin()), + inP_.NumVert(), DuplicateVerts({outR.vertPos_})); + for_each_n(autoPolicy(inQ_.NumVert()), + zip(i30.begin(), vQ2R.begin(), inQ_.vertPos_.begin()), + inQ_.NumVert(), DuplicateVerts({outR.vertPos_})); + // New vertices created from intersections: + for_each_n(autoPolicy(i12.size()), + zip(i12.begin(), v12R.begin(), v12_.begin()), i12.size(), + DuplicateVerts({outR.vertPos_})); + for_each_n(autoPolicy(i21.size()), + zip(i21.begin(), v21R.begin(), v21_.begin()), i21.size(), + DuplicateVerts({outR.vertPos_})); + + PRINT(nPv << " verts from inP"); + PRINT(nQv << " verts from inQ"); + PRINT(n12 << " new verts from edgesP -> facesQ"); + PRINT(n21 << " new verts from facesP -> edgesQ"); + + // Build up new polygonal faces from triangle intersections. At this point the + // calculation switches from parallel to serial. + + // Level 3 + + // This key is the forward halfedge index of P or Q. Only includes intersected + // edges. + concurrent_map> edgesP, edgesQ; + // This key is the face index of + concurrent_map, std::vector> edgesNew; + + AddNewEdgeVerts(edgesP, edgesNew, p1q2_, i12, v12R, inP_.halfedge_, true); + AddNewEdgeVerts(edgesQ, edgesNew, p2q1_, i21, v21R, inQ_.halfedge_, false); + + // Level 4 + Vec faceEdge; + Vec facePQ2R; + std::tie(faceEdge, facePQ2R) = + SizeOutput(outR, inP_, inQ_, i03, i30, i12, i21, p1q2_, p2q1_, invertQ); + + // This gets incremented for each halfedge that's added to a face so that the + // next one knows where to slot in. + Vec facePtrR = faceEdge; + // Intersected halfedges are marked false. + Vec wholeHalfedgeP(inP_.halfedge_.size(), true); + Vec wholeHalfedgeQ(inQ_.halfedge_.size(), true); + // The halfedgeRef contains the data that will become triRef once the faces + // are triangulated. + Vec halfedgeRef(2 * outR.NumEdge()); + + AppendPartialEdges(outR, wholeHalfedgeP, facePtrR, edgesP, halfedgeRef, inP_, + i03, vP2R, facePQ2R.begin(), true); + AppendPartialEdges(outR, wholeHalfedgeQ, facePtrR, edgesQ, halfedgeRef, inQ_, + i30, vQ2R, facePQ2R.begin() + inP_.NumTri(), false); + + AppendNewEdges(outR, facePtrR, edgesNew, halfedgeRef, facePQ2R, + inP_.NumTri()); + + AppendWholeEdges(outR, facePtrR, halfedgeRef, inP_, wholeHalfedgeP, i03, vP2R, + facePQ2R.cview(0, inP_.NumTri()), true); + AppendWholeEdges(outR, facePtrR, halfedgeRef, inQ_, wholeHalfedgeQ, i30, vQ2R, + facePQ2R.cview(inP_.NumTri(), inQ_.NumTri()), false); + +#ifdef MANIFOLD_DEBUG + assemble.Stop(); + Timer triangulate; + triangulate.Start(); +#endif + + // Level 6 + + if (ManifoldParams().intermediateChecks) + ASSERT(outR.IsManifold(), logicErr, "polygon mesh is not manifold!"); + + outR.Face2Tri(faceEdge, halfedgeRef); + +#ifdef MANIFOLD_DEBUG + triangulate.Stop(); + Timer simplify; + simplify.Start(); +#endif + + if (ManifoldParams().intermediateChecks) + ASSERT(outR.IsManifold(), logicErr, "triangulated mesh is not manifold!"); + + CreateProperties(outR, inP_, inQ_); + + UpdateReference(outR, inP_, inQ_, invertQ); + + outR.SimplifyTopology(); + + if (ManifoldParams().intermediateChecks) + ASSERT(outR.Is2Manifold(), logicErr, "simplified mesh is not 2-manifold!"); + +#ifdef MANIFOLD_DEBUG + simplify.Stop(); + Timer sort; + sort.Start(); +#endif + + outR.Finish(); + outR.IncrementMeshIDs(); + +#ifdef MANIFOLD_DEBUG + sort.Stop(); + if (ManifoldParams().verbose) { + assemble.Print("Assembly"); + triangulate.Print("Triangulation"); + simplify.Print("Simplification"); + sort.Print("Sorting"); + std::cout << outR.NumVert() << " verts and " << outR.NumTri() << " tris" + << std::endl; + } +#endif + + return outR; +} + +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/constructors.cpp b/thirdparty/manifold/src/manifold/src/constructors.cpp new file mode 100644 index 000000000000..0350201661e9 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/constructors.cpp @@ -0,0 +1,518 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "cross_section.h" +#include "csg_tree.h" +#include "impl.h" +#include "par.h" +#include "polygon.h" + +namespace { +using namespace manifold; +using namespace thrust::placeholders; + +struct ToSphere { + float length; + void operator()(glm::vec3& v) { + v = glm::cos(glm::half_pi() * (1.0f - v)); + v = length * glm::normalize(v); + if (isnan(v.x)) v = glm::vec3(0.0); + } +}; + +struct Equals { + int val; + bool operator()(int x) { return x == val; } +}; + +struct RemoveFace { + VecView halfedge; + VecView vertLabel; + const int keepLabel; + + bool operator()(int face) { + return vertLabel[halfedge[3 * face].startVert] != keepLabel; + } +}; +} // namespace + +namespace manifold { +/** + * Constructs a smooth version of the input mesh by creating tangents; this + * method will throw if you have supplied tangents with your mesh already. The + * actual triangle resolution is unchanged; use the Refine() method to + * interpolate to a higher-resolution curve. + * + * By default, every edge is calculated for maximum smoothness (very much + * approximately), attempting to minimize the maximum mean Curvature magnitude. + * No higher-order derivatives are considered, as the interpolation is + * independent per triangle, only sharing constraints on their boundaries. + * + * @param meshGL input MeshGL. + * @param sharpenedEdges If desired, you can supply a vector of sharpened + * halfedges, which should in general be a small subset of all halfedges. Order + * of entries doesn't matter, as each one specifies the desired smoothness + * (between zero and one, with one the default for all unspecified halfedges) + * and the halfedge index (3 * triangle index + [0,1,2] where 0 is the edge + * between triVert 0 and 1, etc). + * + * At a smoothness value of zero, a sharp crease is made. The smoothness is + * interpolated along each edge, so the specified value should be thought of as + * an average. Where exactly two sharpened edges meet at a vertex, their + * tangents are rotated to be colinear so that the sharpened edge can be + * continuous. Vertices with only one sharpened edge are completely smooth, + * allowing sharpened edges to smoothly vanish at termination. A single vertex + * can be sharpened by sharping all edges that are incident on it, allowing + * cones to be formed. + */ +Manifold Manifold::Smooth(const MeshGL& meshGL, + const std::vector& sharpenedEdges) { + ASSERT(meshGL.halfedgeTangent.empty(), std::runtime_error, + "when supplying tangents, the normal constructor should be used " + "rather than Smooth()."); + + // Don't allow any triangle merging. + std::vector propertyTolerance(meshGL.numProp - 3, -1); + std::shared_ptr impl = + std::make_shared(meshGL, propertyTolerance); + impl->CreateTangents(impl->UpdateSharpenedEdges(sharpenedEdges)); + return Manifold(impl); +} + +/** + * Constructs a smooth version of the input mesh by creating tangents; this + * method will throw if you have supplied tangents with your mesh already. The + * actual triangle resolution is unchanged; use the Refine() method to + * interpolate to a higher-resolution curve. + * + * By default, every edge is calculated for maximum smoothness (very much + * approximately), attempting to minimize the maximum mean Curvature magnitude. + * No higher-order derivatives are considered, as the interpolation is + * independent per triangle, only sharing constraints on their boundaries. + * + * @param mesh input Mesh. + * @param sharpenedEdges If desired, you can supply a vector of sharpened + * halfedges, which should in general be a small subset of all halfedges. Order + * of entries doesn't matter, as each one specifies the desired smoothness + * (between zero and one, with one the default for all unspecified halfedges) + * and the halfedge index (3 * triangle index + [0,1,2] where 0 is the edge + * between triVert 0 and 1, etc). + * + * At a smoothness value of zero, a sharp crease is made. The smoothness is + * interpolated along each edge, so the specified value should be thought of as + * an average. Where exactly two sharpened edges meet at a vertex, their + * tangents are rotated to be colinear so that the sharpened edge can be + * continuous. Vertices with only one sharpened edge are completely smooth, + * allowing sharpened edges to smoothly vanish at termination. A single vertex + * can be sharpened by sharping all edges that are incident on it, allowing + * cones to be formed. + */ +Manifold Manifold::Smooth(const Mesh& mesh, + const std::vector& sharpenedEdges) { + ASSERT(mesh.halfedgeTangent.empty(), std::runtime_error, + "when supplying tangents, the normal constructor should be used " + "rather than Smooth()."); + + Impl::MeshRelationD relation = {(int)ReserveIDs(1)}; + std::shared_ptr impl = std::make_shared(mesh, relation); + impl->CreateTangents(impl->UpdateSharpenedEdges(sharpenedEdges)); + return Manifold(impl); +} + +/** + * Constructs a tetrahedron centered at the origin with one vertex at (1,1,1) + * and the rest at similarly symmetric points. + */ +Manifold Manifold::Tetrahedron() { + return Manifold(std::make_shared(Impl::Shape::Tetrahedron)); +} + +/** + * Constructs a unit cube (edge lengths all one), by default in the first + * octant, touching the origin. If any dimensions in size are negative, or if + * all are zero, an empty Manifold will be returned. + * + * @param size The X, Y, and Z dimensions of the box. + * @param center Set to true to shift the center to the origin. + */ +Manifold Manifold::Cube(glm::vec3 size, bool center) { + if (size.x < 0.0f || size.y < 0.0f || size.z < 0.0f || + glm::length(size) == 0.) { + return Invalid(); + } + glm::mat4x3 m = + glm::translate(center ? (-size / 2.0f) : glm::vec3(0)) * glm::scale(size); + return Manifold(std::make_shared(Manifold::Impl::Shape::Cube, m)); +} + +/** + * A convenience constructor for the common case of extruding a circle. Can also + * form cones if both radii are specified. + * + * @param height Z-extent + * @param radiusLow Radius of bottom circle. Must be positive. + * @param radiusHigh Radius of top circle. Can equal zero. Default is equal to + * radiusLow. + * @param circularSegments How many line segments to use around the circle. + * Default is calculated by the static Defaults. + * @param center Set to true to shift the center to the origin. Default is + * origin at the bottom. + */ +Manifold Manifold::Cylinder(float height, float radiusLow, float radiusHigh, + int circularSegments, bool center) { + if (height <= 0.0f || radiusLow <= 0.0f) { + return Invalid(); + } + float scale = radiusHigh >= 0.0f ? radiusHigh / radiusLow : 1.0f; + float radius = fmax(radiusLow, radiusHigh); + int n = circularSegments > 2 ? circularSegments + : Quality::GetCircularSegments(radius); + + CrossSection circle = CrossSection::Circle(radiusLow, n); + Manifold cylinder = + Manifold::Extrude(circle, height, 0, 0.0f, glm::vec2(scale)); + if (center) + cylinder = + cylinder.Translate(glm::vec3(0.0f, 0.0f, -height / 2.0f)).AsOriginal(); + return cylinder; +} + +/** + * Constructs a geodesic sphere of a given radius. + * + * @param radius Radius of the sphere. Must be positive. + * @param circularSegments Number of segments along its + * diameter. This number will always be rounded up to the nearest factor of + * four, as this sphere is constructed by refining an octahedron. This means + * there are a circle of vertices on all three of the axis planes. Default is + * calculated by the static Defaults. + */ +Manifold Manifold::Sphere(float radius, int circularSegments) { + if (radius <= 0.0f) { + return Invalid(); + } + int n = circularSegments > 0 ? (circularSegments + 3) / 4 + : Quality::GetCircularSegments(radius) / 4; + auto pImpl_ = std::make_shared(Impl::Shape::Octahedron); + pImpl_->Subdivide([n](glm::vec3 edge) { return n - 1; }); + for_each_n(autoPolicy(pImpl_->NumVert()), pImpl_->vertPos_.begin(), + pImpl_->NumVert(), ToSphere({radius})); + pImpl_->Finish(); + // Ignore preceding octahedron. + pImpl_->InitializeOriginal(); + return Manifold(pImpl_); +} + +/** + * Constructs a manifold from a set of polygons by extruding them along the + * Z-axis. + * Note that high twistDegrees with small nDivisions may cause + * self-intersection. This is not checked here and it is up to the user to + * choose the correct parameters. + * + * @param crossSection A set of non-overlapping polygons to extrude. + * @param height Z-extent of extrusion. + * @param nDivisions Number of extra copies of the crossSection to insert into + * the shape vertically; especially useful in combination with twistDegrees to + * avoid interpolation artifacts. Default is none. + * @param twistDegrees Amount to twist the top crossSection relative to the + * bottom, interpolated linearly for the divisions in between. + * @param scaleTop Amount to scale the top (independently in X and Y). If the + * scale is {0, 0}, a pure cone is formed with only a single vertex at the top. + * Note that scale is applied after twist. + * Default {1, 1}. + */ +Manifold Manifold::Extrude(const CrossSection& crossSection, float height, + int nDivisions, float twistDegrees, + glm::vec2 scaleTop) { + ZoneScoped; + auto polygons = crossSection.ToPolygons(); + if (polygons.size() == 0 || height <= 0.0f) { + return Invalid(); + } + + scaleTop.x = glm::max(scaleTop.x, 0.0f); + scaleTop.y = glm::max(scaleTop.y, 0.0f); + + auto pImpl_ = std::make_shared(); + ++nDivisions; + auto& vertPos = pImpl_->vertPos_; + Vec triVertsDH; + auto& triVerts = triVertsDH; + int nCrossSection = 0; + bool isCone = scaleTop.x == 0.0 && scaleTop.y == 0.0; + int idx = 0; + PolygonsIdx polygonsIndexed; + for (auto& poly : polygons) { + nCrossSection += poly.size(); + SimplePolygonIdx simpleIndexed; + for (const glm::vec2& polyVert : poly) { + vertPos.push_back({polyVert.x, polyVert.y, 0.0f}); + simpleIndexed.push_back({polyVert, idx++}); + } + polygonsIndexed.push_back(simpleIndexed); + } + for (int i = 1; i < nDivisions + 1; ++i) { + float alpha = i / float(nDivisions); + float phi = alpha * twistDegrees; + glm::vec2 scale = glm::mix(glm::vec2(1.0f), scaleTop, alpha); + glm::mat2 rotation(cosd(phi), sind(phi), -sind(phi), cosd(phi)); + glm::mat2 transform = glm::mat2(scale.x, 0.0f, 0.0f, scale.y) * rotation; + int j = 0; + int idx = 0; + for (const auto& poly : polygons) { + for (int vert = 0; vert < poly.size(); ++vert) { + int offset = idx + nCrossSection * i; + int thisVert = vert + offset; + int lastVert = (vert == 0 ? poly.size() : vert) - 1 + offset; + if (i == nDivisions && isCone) { + triVerts.push_back({nCrossSection * i + j, lastVert - nCrossSection, + thisVert - nCrossSection}); + } else { + glm::vec2 pos = transform * poly[vert]; + vertPos.push_back({pos.x, pos.y, height * alpha}); + triVerts.push_back({thisVert, lastVert, thisVert - nCrossSection}); + triVerts.push_back( + {lastVert, lastVert - nCrossSection, thisVert - nCrossSection}); + } + } + ++j; + idx += poly.size(); + } + } + if (isCone) + for (int j = 0; j < polygons.size(); ++j) // Duplicate vertex for Genus + vertPos.push_back({0.0f, 0.0f, height}); + std::vector top = TriangulateIdx(polygonsIndexed); + for (const glm::ivec3& tri : top) { + triVerts.push_back({tri[0], tri[2], tri[1]}); + if (!isCone) triVerts.push_back(tri + nCrossSection * nDivisions); + } + + pImpl_->CreateHalfedges(triVertsDH); + pImpl_->Finish(); + pImpl_->meshRelation_.originalID = ReserveIDs(1); + pImpl_->InitializeOriginal(); + pImpl_->CreateFaces(); + return Manifold(pImpl_); +} + +/** + * Constructs a manifold from a set of polygons by revolving this cross-section + * around its Y-axis and then setting this as the Z-axis of the resulting + * manifold. If the polygons cross the Y-axis, only the part on the positive X + * side is used. Geometrically valid input will result in geometrically valid + * output. + * + * @param crossSection A set of non-overlapping polygons to revolve. + * @param circularSegments Number of segments along its diameter. Default is + * calculated by the static Defaults. + * @param revolveDegrees Number of degrees to revolve. Default is 360 degrees. + */ +Manifold Manifold::Revolve(const CrossSection& crossSection, + int circularSegments, float revolveDegrees) { + ZoneScoped; + Polygons polygons = crossSection.ToPolygons(); + + if (polygons.size() == 0) { + return Invalid(); + } + + const Rect bounds = crossSection.Bounds(); + const float radius = bounds.max.x; + + if (radius <= 0) { + return Invalid(); + } else if (bounds.min.x < 0) { + // Take the x>=0 slice. + glm::vec2 min = bounds.min; + glm::vec2 max = bounds.max; + CrossSection posBoundingBox = CrossSection( + {{0.0, min.y}, {max.x, min.y}, {max.x, max.y}, {0.0, max.y}}); + + polygons = (crossSection ^ posBoundingBox).ToPolygons(); + } + + if (revolveDegrees > 360.0f) { + revolveDegrees = 360.0f; + } + const bool isFullRevolution = revolveDegrees == 360.0f; + + const int nDivisions = + circularSegments > 2 + ? circularSegments + : Quality::GetCircularSegments(radius) * revolveDegrees / 360; + + auto pImpl_ = std::make_shared(); + auto& vertPos = pImpl_->vertPos_; + Vec triVertsDH; + auto& triVerts = triVertsDH; + + std::vector startPoses; + std::vector endPoses; + + const float dPhi = revolveDegrees / nDivisions; + // first and last slice are distinguished if not a full revolution. + const int nSlices = isFullRevolution ? nDivisions : nDivisions + 1; + + for (const auto& poly : polygons) { + std::size_t nPosVerts = 0; + std::size_t nRevolveAxisVerts = 0; + for (auto& pt : poly) { + if (pt.x > 0) { + nPosVerts++; + } else { + nRevolveAxisVerts++; + } + } + + for (int polyVert = 0; polyVert < poly.size(); ++polyVert) { + const int startPosIndex = vertPos.size(); + + if (!isFullRevolution) startPoses.push_back(startPosIndex); + + const glm::vec2 currPolyVertex = poly[polyVert]; + const glm::vec2 prevPolyVertex = + poly[polyVert == 0 ? poly.size() - 1 : polyVert - 1]; + + const int prevStartPosIndex = + startPosIndex + + (polyVert == 0 ? nRevolveAxisVerts + (nSlices * nPosVerts) : 0) + + (prevPolyVertex.x == 0.0 ? -1 : -nSlices); + + for (int slice = 0; slice < nSlices; ++slice) { + const float phi = slice * dPhi; + if (slice == 0 || currPolyVertex.x > 0) { + vertPos.push_back({currPolyVertex.x * cosd(phi), + currPolyVertex.x * sind(phi), currPolyVertex.y}); + } + + if (isFullRevolution || slice > 0) { + const int lastSlice = (slice == 0 ? nDivisions : slice) - 1; + if (currPolyVertex.x > 0.0) { + triVerts.push_back( + {startPosIndex + slice, startPosIndex + lastSlice, + // "Reuse" vertex of first slice if it lies on the revolve axis + (prevPolyVertex.x == 0.0 ? prevStartPosIndex + : prevStartPosIndex + lastSlice)}); + } + + if (prevPolyVertex.x > 0.0) { + triVerts.push_back( + {prevStartPosIndex + lastSlice, prevStartPosIndex + slice, + (currPolyVertex.x == 0.0 ? startPosIndex + : startPosIndex + slice)}); + } + } + } + if (!isFullRevolution) endPoses.push_back(vertPos.size() - 1); + } + } + + // Add front and back triangles if not a full revolution. + if (!isFullRevolution) { + std::vector frontTriangles = + Triangulate(polygons, pImpl_->precision_); + for (auto& t : frontTriangles) { + triVerts.push_back({startPoses[t.x], startPoses[t.y], startPoses[t.z]}); + } + + for (auto& t : frontTriangles) { + triVerts.push_back({endPoses[t.z], endPoses[t.y], endPoses[t.x]}); + } + } + + pImpl_->CreateHalfedges(triVertsDH); + pImpl_->Finish(); + pImpl_->meshRelation_.originalID = ReserveIDs(1); + pImpl_->InitializeOriginal(); + pImpl_->CreateFaces(); + return Manifold(pImpl_); +} + +/** + * Constructs a new manifold from a vector of other manifolds. This is a purely + * topological operation, so care should be taken to avoid creating + * overlapping results. It is the inverse operation of Decompose(). + * + * @param manifolds A vector of Manifolds to lazy-union together. + */ +Manifold Manifold::Compose(const std::vector& manifolds) { + std::vector> children; + for (const auto& manifold : manifolds) { + children.push_back(manifold.pNode_->ToLeafNode()); + } + return Manifold(std::make_shared(CsgLeafNode::Compose(children))); +} + +/** + * This operation returns a vector of Manifolds that are topologically + * disconnected. If everything is connected, the vector is length one, + * containing a copy of the original. It is the inverse operation of Compose(). + */ +std::vector Manifold::Decompose() const { + ZoneScoped; + UnionFind<> uf(NumVert()); + // Graph graph; + auto pImpl_ = GetCsgLeafNode().GetImpl(); + for (const Halfedge& halfedge : pImpl_->halfedge_) { + if (halfedge.IsForward()) uf.unionXY(halfedge.startVert, halfedge.endVert); + } + std::vector componentIndices; + const int numComponents = uf.connectedComponents(componentIndices); + + if (numComponents == 1) { + std::vector meshes(1); + meshes[0] = *this; + return meshes; + } + Vec vertLabel(componentIndices); + + std::vector meshes; + for (int i = 0; i < numComponents; ++i) { + auto impl = std::make_shared(); + // inherit original object's precision + impl->precision_ = pImpl_->precision_; + impl->vertPos_.resize(NumVert()); + Vec vertNew2Old(NumVert()); + auto policy = autoPolicy(NumVert()); + auto start = zip(impl->vertPos_.begin(), vertNew2Old.begin()); + int nVert = + copy_if( + policy, zip(pImpl_->vertPos_.begin(), countAt(0)), + zip(pImpl_->vertPos_.end(), countAt(NumVert())), vertLabel.begin(), + zip(impl->vertPos_.begin(), vertNew2Old.begin()), Equals({i})) - + start; + impl->vertPos_.resize(nVert); + + Vec faceNew2Old(NumTri()); + sequence(policy, faceNew2Old.begin(), faceNew2Old.end()); + + int nFace = remove_if( + policy, faceNew2Old.begin(), faceNew2Old.end(), + RemoveFace({pImpl_->halfedge_, vertLabel, i})) - + faceNew2Old.begin(); + faceNew2Old.resize(nFace); + + impl->GatherFaces(*pImpl_, faceNew2Old); + impl->ReindexVerts(vertNew2Old, pImpl_->NumVert()); + impl->Finish(); + + meshes.push_back(Manifold(impl)); + } + return meshes; +} +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/csg_tree.cpp b/thirdparty/manifold/src/manifold/src/csg_tree.cpp new file mode 100644 index 000000000000..f6c994472dd3 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/csg_tree.cpp @@ -0,0 +1,651 @@ +// Copyright 2022 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#if MANIFOLD_PAR == 'T' && __has_include() +#include +#define TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS 1 +#include +#endif + +#include +#include + +#include "boolean3.h" +#include "csg_tree.h" +#include "impl.h" +#include "mesh_fixes.h" +#include "par.h" + +constexpr int kParallelThreshold = 4096; + +namespace { +using namespace manifold; +struct Transform4x3 { + const glm::mat4x3 transform; + + glm::vec3 operator()(glm::vec3 position) { + return transform * glm::vec4(position, 1.0f); + } +}; + +struct UpdateHalfedge { + const int nextVert; + const int nextEdge; + const int nextFace; + + Halfedge operator()(Halfedge edge) { + edge.startVert += nextVert; + edge.endVert += nextVert; + edge.pairedHalfedge += nextEdge; + edge.face += nextFace; + return edge; + } +}; + +struct UpdateTriProp { + const int nextProp; + + glm::ivec3 operator()(glm::ivec3 tri) { + tri += nextProp; + return tri; + } +}; + +struct UpdateMeshIDs { + const int offset; + + TriRef operator()(TriRef ref) { + ref.meshID += offset; + return ref; + } +}; + +struct CheckOverlap { + VecView boxes; + const size_t i; + bool operator()(int j) { return boxes[i].DoesOverlap(boxes[j]); } +}; + +using SharedImpl = std::variant, + std::shared_ptr>; +struct GetImplPtr { + const Manifold::Impl *operator()(const SharedImpl &p) { + if (std::holds_alternative>(p)) { + return std::get_if>(&p)->get(); + } else { + return std::get_if>(&p)->get(); + } + }; +}; + +struct MeshCompare { + bool operator()(const SharedImpl &a, const SharedImpl &b) { + return GetImplPtr()(a)->NumVert() < GetImplPtr()(b)->NumVert(); + } +}; + +} // namespace +namespace manifold { + +std::shared_ptr CsgNode::Boolean( + const std::shared_ptr &second, OpType op) { + if (auto opNode = std::dynamic_pointer_cast(second)) { + // "this" is not a CsgOpNode (which overrides Boolean), but if "second" is + // and the operation is commutative, we let it built the tree. + if ((op == OpType::Add || op == OpType::Intersect)) { + return opNode->Boolean(shared_from_this(), op); + } + } + std::vector> children({shared_from_this(), second}); + return std::make_shared(children, op); +} + +std::shared_ptr CsgNode::Translate(const glm::vec3 &t) const { + glm::mat4x3 transform(1.0f); + transform[3] += t; + return Transform(transform); +} + +std::shared_ptr CsgNode::Scale(const glm::vec3 &v) const { + glm::mat4x3 transform(1.0f); + for (int i : {0, 1, 2}) transform[i] *= v; + return Transform(transform); +} + +std::shared_ptr CsgNode::Rotate(float xDegrees, float yDegrees, + float zDegrees) const { + glm::mat3 rX(1.0f, 0.0f, 0.0f, // + 0.0f, cosd(xDegrees), sind(xDegrees), // + 0.0f, -sind(xDegrees), cosd(xDegrees)); + glm::mat3 rY(cosd(yDegrees), 0.0f, -sind(yDegrees), // + 0.0f, 1.0f, 0.0f, // + sind(yDegrees), 0.0f, cosd(yDegrees)); + glm::mat3 rZ(cosd(zDegrees), sind(zDegrees), 0.0f, // + -sind(zDegrees), cosd(zDegrees), 0.0f, // + 0.0f, 0.0f, 1.0f); + glm::mat3 result = rZ * rY * rX; + glm::mat4x3 transform(result[0], result[1], result[2], glm::vec3(0.0f, 0.0f, 0.0f)); + return Transform(transform); +} + +CsgLeafNode::CsgLeafNode() : pImpl_(std::make_shared()) {} + +CsgLeafNode::CsgLeafNode(std::shared_ptr pImpl_) + : pImpl_(pImpl_) {} + +CsgLeafNode::CsgLeafNode(std::shared_ptr pImpl_, + glm::mat4x3 transform_) + : pImpl_(pImpl_), transform_(transform_) {} + +std::shared_ptr CsgLeafNode::GetImpl() const { + if (transform_ == glm::mat4x3(1.0f)) return pImpl_; + pImpl_ = + std::make_shared(pImpl_->Transform(transform_)); + transform_ = glm::mat4x3(1.0f); + return pImpl_; +} + +glm::mat4x3 CsgLeafNode::GetTransform() const { return transform_; } + +std::shared_ptr CsgLeafNode::ToLeafNode() const { + return std::make_shared(*this); +} + +std::shared_ptr CsgLeafNode::Transform(const glm::mat4x3 &m) const { + return std::make_shared(pImpl_, m * glm::mat4(transform_)); +} + +CsgNodeType CsgLeafNode::GetNodeType() const { return CsgNodeType::Leaf; } + +/** + * Efficient union of a set of pairwise disjoint meshes. + */ +Manifold::Impl CsgLeafNode::Compose( + const std::vector> &nodes) { + ZoneScoped; + float precision = -1; + int numVert = 0; + int numEdge = 0; + int numTri = 0; + int numPropVert = 0; + std::vector vertIndices; + std::vector edgeIndices; + std::vector triIndices; + std::vector propVertIndices; + int numPropOut = 0; + for (auto &node : nodes) { + float nodeOldScale = node->pImpl_->bBox_.Scale(); + float nodeNewScale = + node->pImpl_->bBox_.Transform(node->transform_).Scale(); + float nodePrecision = node->pImpl_->precision_; + nodePrecision *= glm::max(1.0f, nodeNewScale / nodeOldScale); + nodePrecision = glm::max(nodePrecision, kTolerance * nodeNewScale); + if (!glm::isfinite(nodePrecision)) nodePrecision = -1; + precision = glm::max(precision, nodePrecision); + + vertIndices.push_back(numVert); + edgeIndices.push_back(numEdge * 2); + triIndices.push_back(numTri); + propVertIndices.push_back(numPropVert); + numVert += node->pImpl_->NumVert(); + numEdge += node->pImpl_->NumEdge(); + numTri += node->pImpl_->NumTri(); + const int numProp = node->pImpl_->NumProp(); + numPropOut = glm::max(numPropOut, numProp); + numPropVert += + numProp == 0 ? 1 + : node->pImpl_->meshRelation_.properties.size() / numProp; + } + + Manifold::Impl combined; + combined.precision_ = precision; + combined.vertPos_.resize(numVert); + combined.halfedge_.resize(2 * numEdge); + combined.faceNormal_.resize(numTri); + combined.halfedgeTangent_.resize(2 * numEdge); + combined.meshRelation_.triRef.resize(numTri); + if (numPropOut > 0) { + combined.meshRelation_.numProp = numPropOut; + combined.meshRelation_.properties.resize(numPropOut * numPropVert, 0); + combined.meshRelation_.triProperties.resize(numTri); + } + auto policy = autoPolicy(numTri); + + // if we are already parallelizing for each node, do not perform multithreaded + // copying as it will slightly hurt performance + if (nodes.size() > 1 && policy == ExecutionPolicy::Par) + policy = ExecutionPolicy::Seq; + + for_each_n( + nodes.size() > 1 ? ExecutionPolicy::Par : ExecutionPolicy::Seq, + countAt(0), nodes.size(), + [&nodes, &vertIndices, &edgeIndices, &triIndices, &propVertIndices, + numPropOut, &combined, policy](int i) { + auto &node = nodes[i]; + copy(policy, node->pImpl_->halfedgeTangent_.begin(), + node->pImpl_->halfedgeTangent_.end(), + combined.halfedgeTangent_.begin() + edgeIndices[i]); + transform( + policy, node->pImpl_->halfedge_.begin(), + node->pImpl_->halfedge_.end(), + combined.halfedge_.begin() + edgeIndices[i], + UpdateHalfedge({vertIndices[i], edgeIndices[i], triIndices[i]})); + + if (numPropOut > 0) { + auto start = + combined.meshRelation_.triProperties.begin() + triIndices[i]; + if (node->pImpl_->NumProp() > 0) { + auto &triProp = node->pImpl_->meshRelation_.triProperties; + transform(policy, triProp.begin(), triProp.end(), start, + UpdateTriProp({propVertIndices[i]})); + + const int numProp = node->pImpl_->NumProp(); + auto &oldProp = node->pImpl_->meshRelation_.properties; + auto &newProp = combined.meshRelation_.properties; + for (int p = 0; p < numProp; ++p) { + strided_range::IterC> oldRange(oldProp.begin() + p, + oldProp.end(), numProp); + strided_range::Iter> newRange( + newProp.begin() + numPropOut * propVertIndices[i] + p, + newProp.end(), numPropOut); + copy(policy, oldRange.begin(), oldRange.end(), newRange.begin()); + } + } else { + // point all triangles at single new property of zeros. + fill(policy, start, start + node->pImpl_->NumTri(), + glm::ivec3(propVertIndices[i])); + } + } + + if (node->transform_ == glm::mat4x3(1.0f)) { + copy(policy, node->pImpl_->vertPos_.begin(), + node->pImpl_->vertPos_.end(), + combined.vertPos_.begin() + vertIndices[i]); + copy(policy, node->pImpl_->faceNormal_.begin(), + node->pImpl_->faceNormal_.end(), + combined.faceNormal_.begin() + triIndices[i]); + } else { + // no need to apply the transform to the node, just copy the vertices + // and face normals and apply transform on the fly + auto vertPosBegin = thrust::make_transform_iterator( + node->pImpl_->vertPos_.begin(), Transform4x3({node->transform_})); + glm::mat3 normalTransform = + glm::inverse(glm::transpose(glm::mat3(node->transform_))); + auto faceNormalBegin = thrust::make_transform_iterator( + node->pImpl_->faceNormal_.begin(), + TransformNormals({normalTransform})); + copy_n(policy, vertPosBegin, node->pImpl_->vertPos_.size(), + combined.vertPos_.begin() + vertIndices[i]); + copy_n(policy, faceNormalBegin, node->pImpl_->faceNormal_.size(), + combined.faceNormal_.begin() + triIndices[i]); + + const bool invert = glm::determinant(glm::mat3(node->transform_)) < 0; + for_each_n(policy, + zip(combined.halfedgeTangent_.begin() + edgeIndices[i], + countAt(0)), + node->pImpl_->halfedgeTangent_.size(), + TransformTangents{glm::mat3(node->transform_), invert, + node->pImpl_->halfedgeTangent_, + node->pImpl_->halfedge_}); + if (invert) + for_each_n(policy, + zip(combined.meshRelation_.triRef.begin(), + countAt(triIndices[i])), + node->pImpl_->NumTri(), FlipTris({combined.halfedge_})); + } + // Since the nodes may be copies containing the same meshIDs, it is + // important to add an offset so that each node instance gets + // unique meshIDs. + const int offset = i * Manifold::Impl::meshIDCounter_; + transform(policy, node->pImpl_->meshRelation_.triRef.begin(), + node->pImpl_->meshRelation_.triRef.end(), + combined.meshRelation_.triRef.begin() + triIndices[i], + UpdateMeshIDs({offset})); + }); + + for (int i = 0; i < nodes.size(); i++) { + auto &node = nodes[i]; + const int offset = i * Manifold::Impl::meshIDCounter_; + + for (const auto pair : node->pImpl_->meshRelation_.meshIDtransform) { + combined.meshRelation_.meshIDtransform[pair.first + offset] = pair.second; + } + } + + // required to remove parts that are smaller than the precision + combined.SimplifyTopology(); + combined.Finish(); + combined.IncrementMeshIDs(); + return combined; +} + +CsgOpNode::CsgOpNode() {} + +CsgOpNode::CsgOpNode(const std::vector> &children, + OpType op) + : impl_(Impl{}) { + auto impl = impl_.GetGuard(); + impl->children_ = children; + SetOp(op); +} + +CsgOpNode::CsgOpNode(std::vector> &&children, + OpType op) + : impl_(Impl{}) { + auto impl = impl_.GetGuard(); + impl->children_ = children; + SetOp(op); +} + +std::shared_ptr CsgOpNode::Boolean( + const std::shared_ptr &second, OpType op) { + std::vector> children; + + auto isReused = [](const auto &node) { return node->impl_.UseCount() > 1; }; + + auto copyChildren = [&](const auto &list, const glm::mat4x3 &transform) { + for (const auto &child : list) { + children.push_back(child->Transform(transform)); + } + }; + + auto self = std::dynamic_pointer_cast(shared_from_this()); + assert(self); + if (IsOp(op) && !isReused(self)) { + auto impl = impl_.GetGuard(); + copyChildren(impl->children_, transform_); + } else { + children.push_back(self); + } + + auto secondOp = std::dynamic_pointer_cast(second); + auto canInlineSecondOp = [&]() { + switch (op) { + case OpType::Add: + case OpType::Intersect: + return secondOp->IsOp(op); + case OpType::Subtract: + return secondOp->IsOp(OpType::Add); + default: + return false; + } + }; + + if (secondOp && canInlineSecondOp() && !isReused(secondOp)) { + auto secondImpl = secondOp->impl_.GetGuard(); + copyChildren(secondImpl->children_, secondOp->transform_); + } else { + children.push_back(second); + } + + return std::make_shared(children, op); +} + +std::shared_ptr CsgOpNode::Transform(const glm::mat4x3 &m) const { + auto node = std::make_shared(); + node->impl_ = impl_; + node->transform_ = m * glm::mat4(transform_); + node->op_ = op_; + return node; +} + +std::shared_ptr CsgOpNode::ToLeafNode() const { + if (cache_ != nullptr) return cache_; + // turn the children into leaf nodes + GetChildren(); + auto impl = impl_.GetGuard(); + auto &children_ = impl->children_; + if (children_.size() > 1) { + switch (op_) { + case CsgNodeType::Union: + BatchUnion(); + break; + case CsgNodeType::Intersection: { + std::vector> impls; + for (auto &child : children_) { + impls.push_back( + std::dynamic_pointer_cast(child)->GetImpl()); + } + children_.clear(); + children_.push_back(std::make_shared( + BatchBoolean(OpType::Intersect, impls))); + break; + }; + case CsgNodeType::Difference: { + // take the lhs out and treat the remaining nodes as the rhs, perform + // union optimization for them + auto lhs = std::dynamic_pointer_cast(children_.front()); + children_.erase(children_.begin()); + BatchUnion(); + auto rhs = std::dynamic_pointer_cast(children_.front()); + children_.clear(); + Boolean3 boolean(*lhs->GetImpl(), *rhs->GetImpl(), OpType::Subtract); + children_.push_back( + std::make_shared(std::make_shared( + boolean.Result(OpType::Subtract)))); + }; + case CsgNodeType::Leaf: + // unreachable + break; + } + } else if (children_.size() == 0) { + return nullptr; + } + // children_ must contain only one CsgLeafNode now, and its Transform will + // give CsgLeafNode as well + cache_ = std::dynamic_pointer_cast( + children_.front()->Transform(transform_)); + return cache_; +} + +/** + * Efficient boolean operation on a set of nodes utilizing commutativity of the + * operation. Only supports union and intersection. + */ +std::shared_ptr CsgOpNode::BatchBoolean( + OpType operation, + std::vector> &results) { + ZoneScoped; + auto getImplPtr = GetImplPtr(); + ASSERT(operation != OpType::Subtract, logicErr, + "BatchBoolean doesn't support Difference."); + // common cases + if (results.size() == 0) return std::make_shared(); + if (results.size() == 1) + return std::make_shared(*results.front()); + if (results.size() == 2) { + Boolean3 boolean(*results[0], *results[1], operation); + return std::make_shared(boolean.Result(operation)); + } +#if MANIFOLD_PAR == 'T' && __has_include() + if (!ManifoldParams().deterministic) { + tbb::task_group group; + tbb::concurrent_priority_queue queue( + results.size()); + for (auto result : results) { + queue.emplace(result); + } + results.clear(); + std::function process = [&]() { + while (queue.size() > 1) { + SharedImpl a, b; + if (!queue.try_pop(a)) continue; + if (!queue.try_pop(b)) { + queue.push(a); + continue; + } + group.run([&, a, b]() { + const Manifold::Impl *aImpl; + const Manifold::Impl *bImpl; + Boolean3 boolean(*getImplPtr(a), *getImplPtr(b), operation); + queue.emplace( + std::make_shared(boolean.Result(operation))); + return group.run(process); + }); + } + }; + group.run_and_wait(process); + SharedImpl r; + queue.try_pop(r); + return *std::get_if>(&r); + } +#endif + // apply boolean operations starting from smaller meshes + // the assumption is that boolean operations on smaller meshes is faster, + // due to less data being copied and processed + auto cmpFn = MeshCompare(); + std::make_heap(results.begin(), results.end(), cmpFn); + while (results.size() > 1) { + std::pop_heap(results.begin(), results.end(), cmpFn); + auto a = std::move(results.back()); + results.pop_back(); + std::pop_heap(results.begin(), results.end(), cmpFn); + auto b = std::move(results.back()); + results.pop_back(); + // boolean operation + Boolean3 boolean(*a, *b, operation); + if (results.size() == 0) { + return std::make_shared(boolean.Result(operation)); + } + results.push_back( + std::make_shared(boolean.Result(operation))); + std::push_heap(results.begin(), results.end(), cmpFn); + } + return std::make_shared(*results.front()); +} + +/** + * Efficient union operation on a set of nodes by doing Compose as much as + * possible. + * Note: Due to some unknown issues with `Compose`, we are now doing + * `BatchBoolean` instead of using `Compose` for non-intersecting manifolds. + */ +void CsgOpNode::BatchUnion() const { + ZoneScoped; + // INVARIANT: children_ is a vector of leaf nodes + // this kMaxUnionSize is a heuristic to avoid the pairwise disjoint check + // with O(n^2) complexity to take too long. + // If the number of children exceeded this limit, we will operate on chunks + // with size kMaxUnionSize. + constexpr int kMaxUnionSize = 1000; + auto impl = impl_.GetGuard(); + auto &children_ = impl->children_; + while (children_.size() > 1) { + const int start = (children_.size() > kMaxUnionSize) + ? (children_.size() - kMaxUnionSize) + : 0; + Vec boxes; + boxes.reserve(children_.size() - start); + for (int i = start; i < children_.size(); i++) { + boxes.push_back(std::dynamic_pointer_cast(children_[i]) + ->GetImpl() + ->bBox_); + } + // partition the children into a set of disjoint sets + // each set contains a set of children that are pairwise disjoint + std::vector> disjointSets; + for (size_t i = 0; i < boxes.size(); i++) { + auto lambda = [&boxes, i](const Vec &set) { + return std::find_if(set.begin(), set.end(), CheckOverlap({boxes, i})) == + set.end(); + }; + auto it = std::find_if(disjointSets.begin(), disjointSets.end(), lambda); + if (it == disjointSets.end()) { + disjointSets.push_back(std::vector{i}); + } else { + it->push_back(i); + } + } + // compose each set of disjoint children + std::vector> impls; + for (auto &set : disjointSets) { + if (set.size() == 1) { + impls.push_back( + std::dynamic_pointer_cast(children_[start + set[0]]) + ->GetImpl()); + } else { + std::vector> tmp; + for (size_t j : set) { + tmp.push_back( + std::dynamic_pointer_cast(children_[start + j])); + } + impls.push_back( + std::make_shared(CsgLeafNode::Compose(tmp))); + } + } + + children_.erase(children_.begin() + start, children_.end()); + children_.push_back( + std::make_shared(BatchBoolean(OpType::Add, impls))); + // move it to the front as we process from the back, and the newly added + // child should be quite complicated + std::swap(children_.front(), children_.back()); + } +} + +/** + * Flatten the children to a list of leaf nodes and return them. + * If forceToLeafNodes is true, the list will be guaranteed to be a list of leaf + * nodes (i.e. no ops). Otherwise, the list may contain ops. Note that this + * function will not apply the transform to children, as they may be shared with + * other nodes. + */ +std::vector> &CsgOpNode::GetChildren( + bool forceToLeafNodes) const { + auto impl = impl_.GetGuard(); + + if (forceToLeafNodes && !impl->forcedToLeafNodes_) { + impl->forcedToLeafNodes_ = true; + for_each(impl->children_.size() > 1 && !ManifoldParams().deterministic + ? ExecutionPolicy::Par + : ExecutionPolicy::Seq, + impl->children_.begin(), impl->children_.end(), [](auto &child) { + if (child->GetNodeType() != CsgNodeType::Leaf) { + child = child->ToLeafNode(); + } + }); + } + return impl->children_; +} + +void CsgOpNode::SetOp(OpType op) { + switch (op) { + case OpType::Add: + op_ = CsgNodeType::Union; + break; + case OpType::Subtract: + op_ = CsgNodeType::Difference; + break; + case OpType::Intersect: + op_ = CsgNodeType::Intersection; + break; + } +} + +bool CsgOpNode::IsOp(OpType op) { + switch (op) { + case OpType::Add: + return op_ == CsgNodeType::Union; + case OpType::Subtract: + return op_ == CsgNodeType::Difference; + case OpType::Intersect: + return op_ == CsgNodeType::Intersection; + default: + return false; + } +} + +glm::mat4x3 CsgOpNode::GetTransform() const { return transform_; } + +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/csg_tree.h b/thirdparty/manifold/src/manifold/src/csg_tree.h new file mode 100644 index 000000000000..5acbd25fa297 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/csg_tree.h @@ -0,0 +1,109 @@ +// Copyright 2022 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "manifold.h" +#include "utils.h" + +namespace manifold { + +enum class CsgNodeType { Union, Intersection, Difference, Leaf }; + +class CsgLeafNode; + +class CsgNode : public std::enable_shared_from_this { + public: + virtual std::shared_ptr ToLeafNode() const = 0; + virtual std::shared_ptr Transform(const glm::mat4x3 &m) const = 0; + virtual CsgNodeType GetNodeType() const = 0; + virtual glm::mat4x3 GetTransform() const = 0; + + virtual std::shared_ptr Boolean( + const std::shared_ptr &second, OpType op); + + std::shared_ptr Translate(const glm::vec3 &t) const; + std::shared_ptr Scale(const glm::vec3 &s) const; + std::shared_ptr Rotate(float xDegrees = 0, float yDegrees = 0, + float zDegrees = 0) const; +}; + +class CsgLeafNode final : public CsgNode { + public: + CsgLeafNode(); + CsgLeafNode(std::shared_ptr pImpl_); + CsgLeafNode(std::shared_ptr pImpl_, + glm::mat4x3 transform_); + + std::shared_ptr GetImpl() const; + + std::shared_ptr ToLeafNode() const override; + + std::shared_ptr Transform(const glm::mat4x3 &m) const override; + + CsgNodeType GetNodeType() const override; + + glm::mat4x3 GetTransform() const override; + + static Manifold::Impl Compose( + const std::vector> &nodes); + + private: + mutable std::shared_ptr pImpl_; + mutable glm::mat4x3 transform_ = glm::mat4x3(1.0f); +}; + +class CsgOpNode final : public CsgNode { + public: + CsgOpNode(); + + CsgOpNode(const std::vector> &children, OpType op); + + CsgOpNode(std::vector> &&children, OpType op); + + std::shared_ptr Boolean(const std::shared_ptr &second, + OpType op) override; + + std::shared_ptr Transform(const glm::mat4x3 &m) const override; + + std::shared_ptr ToLeafNode() const override; + + CsgNodeType GetNodeType() const override { return op_; } + + glm::mat4x3 GetTransform() const override; + + private: + struct Impl { + std::vector> children_; + bool forcedToLeafNodes_ = false; + }; + mutable ConcurrentSharedPtr impl_ = ConcurrentSharedPtr(Impl{}); + CsgNodeType op_; + glm::mat4x3 transform_ = glm::mat4x3(1.0f); + // the following fields are for lazy evaluation, so they are mutable + mutable std::shared_ptr cache_ = nullptr; + + void SetOp(OpType); + bool IsOp(OpType op); + + static std::shared_ptr BatchBoolean( + OpType operation, + std::vector> &results); + + void BatchUnion() const; + + std::vector> &GetChildren( + bool forceToLeafNodes = true) const; +}; + +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/edge_op.cpp b/thirdparty/manifold/src/manifold/src/edge_op.cpp new file mode 100644 index 000000000000..51460b911451 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/edge_op.cpp @@ -0,0 +1,678 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "impl.h" +#include "par.h" + +namespace { +using namespace manifold; + +glm::ivec3 TriOf(int edge) { + glm::ivec3 triEdge; + triEdge[0] = edge; + triEdge[1] = NextHalfedge(triEdge[0]); + triEdge[2] = NextHalfedge(triEdge[1]); + return triEdge; +} + +bool Is01Longest(glm::vec2 v0, glm::vec2 v1, glm::vec2 v2) { + const glm::vec2 e[3] = {v1 - v0, v2 - v1, v0 - v2}; + float l[3]; + for (int i : {0, 1, 2}) l[i] = glm::dot(e[i], e[i]); + return l[0] > l[1] && l[0] > l[2]; +} + +struct DuplicateEdge { + const Halfedge* sortedHalfedge; + + bool operator()(int edge) { + const Halfedge& halfedge = sortedHalfedge[edge]; + const Halfedge& nextHalfedge = sortedHalfedge[edge + 1]; + return halfedge.startVert == nextHalfedge.startVert && + halfedge.endVert == nextHalfedge.endVert; + } +}; + +struct ShortEdge { + VecView halfedge; + VecView vertPos; + const float precision; + + bool operator()(int edge) const { + if (halfedge[edge].pairedHalfedge < 0) return false; + // Flag short edges + const glm::vec3 delta = + vertPos[halfedge[edge].endVert] - vertPos[halfedge[edge].startVert]; + return glm::dot(delta, delta) < precision * precision; + } +}; + +struct FlagEdge { + VecView halfedge; + VecView triRef; + + bool operator()(int edge) const { + if (halfedge[edge].pairedHalfedge < 0) return false; + // Flag redundant edges - those where the startVert is surrounded by only + // two original triangles. + const TriRef ref0 = triRef[edge / 3]; + int current = NextHalfedge(halfedge[edge].pairedHalfedge); + const TriRef ref1 = triRef[current / 3]; + while (current != edge) { + current = NextHalfedge(halfedge[current].pairedHalfedge); + int tri = current / 3; + const TriRef ref = triRef[tri]; + if (!ref.SameFace(ref0) && !ref.SameFace(ref1)) return false; + } + return true; + } +}; + +struct SwappableEdge { + VecView halfedge; + VecView vertPos; + VecView triNormal; + const float precision; + + bool operator()(int edge) const { + if (halfedge[edge].pairedHalfedge < 0) return false; + + int tri = halfedge[edge].face; + glm::ivec3 triedge = TriOf(edge); + glm::mat3x2 projection = GetAxisAlignedProjection(triNormal[tri]); + glm::vec2 v[3]; + for (int i : {0, 1, 2}) + v[i] = projection * vertPos[halfedge[triedge[i]].startVert]; + if (CCW(v[0], v[1], v[2], precision) > 0 || !Is01Longest(v[0], v[1], v[2])) + return false; + + // Switch to neighbor's projection. + edge = halfedge[edge].pairedHalfedge; + tri = halfedge[edge].face; + triedge = TriOf(edge); + projection = GetAxisAlignedProjection(triNormal[tri]); + for (int i : {0, 1, 2}) + v[i] = projection * vertPos[halfedge[triedge[i]].startVert]; + return CCW(v[0], v[1], v[2], precision) > 0 || + Is01Longest(v[0], v[1], v[2]); + } +}; + +struct SortEntry { + int start; + int end; + int index; + inline bool operator<(const SortEntry& other) const { + return start == other.start ? end < other.end : start < other.start; + } +}; +} // namespace + +namespace manifold { + +/** + * Collapses degenerate triangles by removing edges shorter than precision_ and + * any edge that is preceeded by an edge that joins the same two face relations. + * It also performs edge swaps on the long edges of degenerate triangles, though + * there are some configurations of degenerates that cannot be removed this way. + * + * Before collapsing edges, the mesh is checked for duplicate edges (more than + * one pair of triangles sharing the same edge), which are removed by + * duplicating one vert and adding two triangles. These degenerate triangles are + * likely to be collapsed again in the subsequent simplification. + * + * Note when an edge collapse would result in something non-manifold, the + * vertices are duplicated in such a way as to remove handles or separate + * meshes, thus decreasing the Genus(). It only increases when meshes that have + * collapsed to just a pair of triangles are removed entirely. + * + * Rather than actually removing the edges, this step merely marks them for + * removal, by setting vertPos to NaN and halfedge to {-1, -1, -1, -1}. + */ +void Manifold::Impl::SimplifyTopology() { + if (!halfedge_.size()) return; + + const size_t nbEdges = halfedge_.size(); + auto policy = autoPolicy(nbEdges); + size_t numFlagged = 0; + Vec bflags(nbEdges); + + // In the case of a very bad triangulation, it is possible to create pinched + // verts. They must be removed before edge collapse. + SplitPinchedVerts(); + + { + ZoneScopedN("DedupeEdge"); + Vec entries; + entries.reserve(nbEdges / 2); + for (int i = 0; i < nbEdges; ++i) { + if (halfedge_[i].IsForward()) { + entries.push_back({halfedge_[i].startVert, halfedge_[i].endVert, i}); + } + } + + stable_sort(policy, entries.begin(), entries.end()); + for (int i = 0; i < entries.size() - 1; ++i) { + if (entries[i].start == entries[i + 1].start && + entries[i].end == entries[i + 1].end) { + DedupeEdge(entries[i].index); + numFlagged++; + } + } + } + +#ifdef MANIFOLD_DEBUG + if (ManifoldParams().verbose && numFlagged > 0) { + std::cout << "found " << numFlagged << " duplicate edges to split" + << std::endl; + } +#endif + + if (!ManifoldParams().cleanupTriangles) { + return; + } + + std::vector scratchBuffer; + scratchBuffer.reserve(10); + { + ZoneScopedN("CollapseShortEdge"); + numFlagged = 0; + ShortEdge se{halfedge_, vertPos_, precision_}; + for_each_n(policy, countAt(0_z), nbEdges, + [&](size_t i) { bflags[i] = se(i); }); + for (size_t i = 0; i < nbEdges; ++i) { + if (bflags[i]) { + CollapseEdge(i, scratchBuffer); + scratchBuffer.resize(0); + numFlagged++; + } + } + } + +#ifdef MANIFOLD_DEBUG + if (ManifoldParams().verbose && numFlagged > 0) { + std::cout << "found " << numFlagged << " short edges to collapse" + << std::endl; + } +#endif + + { + ZoneScopedN("CollapseFlaggedEdge"); + numFlagged = 0; + FlagEdge se{halfedge_, meshRelation_.triRef}; + for_each_n(policy, countAt(0_z), nbEdges, + [&](size_t i) { bflags[i] = se(i); }); + for (size_t i = 0; i < nbEdges; ++i) { + if (bflags[i]) { + CollapseEdge(i, scratchBuffer); + scratchBuffer.resize(0); + numFlagged++; + } + } + } + +#ifdef MANIFOLD_DEBUG + if (ManifoldParams().verbose && numFlagged > 0) { + std::cout << "found " << numFlagged << " colinear edges to collapse" + << std::endl; + } +#endif + + { + ZoneScopedN("RecursiveEdgeSwap"); + numFlagged = 0; + SwappableEdge se{halfedge_, vertPos_, faceNormal_, precision_}; + for_each_n(policy, countAt(0_z), nbEdges, + [&](size_t i) { bflags[i] = se(i); }); + std::vector edgeSwapStack; + std::vector visited(halfedge_.size(), -1); + int tag = 0; + for (size_t i = 0; i < nbEdges; ++i) { + if (bflags[i]) { + numFlagged++; + tag++; + RecursiveEdgeSwap(i, tag, visited, edgeSwapStack, scratchBuffer); + while (!edgeSwapStack.empty()) { + int last = edgeSwapStack.back(); + edgeSwapStack.pop_back(); + RecursiveEdgeSwap(last, tag, visited, edgeSwapStack, scratchBuffer); + } + } + } + } + +#ifdef MANIFOLD_DEBUG + if (ManifoldParams().verbose && numFlagged > 0) { + std::cout << "found " << numFlagged << " edges to swap" << std::endl; + } +#endif +} + +// Deduplicate the given 4-manifold edge by duplicating endVert, thus making the +// edges distinct. Also duplicates startVert if it becomes pinched. +void Manifold::Impl::DedupeEdge(const int edge) { + // Orbit endVert + const int startVert = halfedge_[edge].startVert; + const int endVert = halfedge_[edge].endVert; + int current = halfedge_[NextHalfedge(edge)].pairedHalfedge; + while (current != edge) { + const int vert = halfedge_[current].startVert; + if (vert == startVert) { + // Single topological unit needs 2 faces added to be split + const int newVert = vertPos_.size(); + vertPos_.push_back(vertPos_[endVert]); + if (vertNormal_.size() > 0) vertNormal_.push_back(vertNormal_[endVert]); + current = halfedge_[NextHalfedge(current)].pairedHalfedge; + const int opposite = halfedge_[NextHalfedge(edge)].pairedHalfedge; + + UpdateVert(newVert, current, opposite); + + int newHalfedge = halfedge_.size(); + int newFace = newHalfedge / 3; + int oldFace = halfedge_[current].face; + int outsideVert = halfedge_[current].startVert; + halfedge_.push_back({endVert, newVert, -1, newFace}); + halfedge_.push_back({newVert, outsideVert, -1, newFace}); + halfedge_.push_back({outsideVert, endVert, -1, newFace}); + PairUp(newHalfedge + 2, halfedge_[current].pairedHalfedge); + PairUp(newHalfedge + 1, current); + if (meshRelation_.triRef.size() > 0) + meshRelation_.triRef.push_back(meshRelation_.triRef[oldFace]); + if (meshRelation_.triProperties.size() > 0) + meshRelation_.triProperties.push_back( + meshRelation_.triProperties[oldFace]); + if (faceNormal_.size() > 0) faceNormal_.push_back(faceNormal_[oldFace]); + + newHalfedge += 3; + ++newFace; + oldFace = halfedge_[opposite].face; + outsideVert = halfedge_[opposite].startVert; + halfedge_.push_back({newVert, endVert, -1, newFace}); + halfedge_.push_back({endVert, outsideVert, -1, newFace}); + halfedge_.push_back({outsideVert, newVert, -1, newFace}); + PairUp(newHalfedge + 2, halfedge_[opposite].pairedHalfedge); + PairUp(newHalfedge + 1, opposite); + PairUp(newHalfedge, newHalfedge - 3); + if (meshRelation_.triRef.size() > 0) + meshRelation_.triRef.push_back(meshRelation_.triRef[oldFace]); + if (meshRelation_.triProperties.size() > 0) + meshRelation_.triProperties.push_back( + meshRelation_.triProperties[oldFace]); + if (faceNormal_.size() > 0) faceNormal_.push_back(faceNormal_[oldFace]); + + break; + } + + current = halfedge_[NextHalfedge(current)].pairedHalfedge; + } + + if (current == edge) { + // Separate topological unit needs no new faces to be split + const int newVert = vertPos_.size(); + vertPos_.push_back(vertPos_[endVert]); + if (vertNormal_.size() > 0) vertNormal_.push_back(vertNormal_[endVert]); + + ForVert(NextHalfedge(current), [this, newVert](int e) { + halfedge_[e].startVert = newVert; + halfedge_[halfedge_[e].pairedHalfedge].endVert = newVert; + }); + } + + // Orbit startVert + const int pair = halfedge_[edge].pairedHalfedge; + current = halfedge_[NextHalfedge(pair)].pairedHalfedge; + while (current != pair) { + const int vert = halfedge_[current].startVert; + if (vert == endVert) { + break; // Connected: not a pinched vert + } + current = halfedge_[NextHalfedge(current)].pairedHalfedge; + } + + if (current == pair) { + // Split the pinched vert the previous split created. + const int newVert = vertPos_.size(); + vertPos_.push_back(vertPos_[endVert]); + if (vertNormal_.size() > 0) vertNormal_.push_back(vertNormal_[endVert]); + + ForVert(NextHalfedge(current), [this, newVert](int e) { + halfedge_[e].startVert = newVert; + halfedge_[halfedge_[e].pairedHalfedge].endVert = newVert; + }); + } +} + +void Manifold::Impl::PairUp(int edge0, int edge1) { + halfedge_[edge0].pairedHalfedge = edge1; + halfedge_[edge1].pairedHalfedge = edge0; +} + +// Traverses CW around startEdge.endVert from startEdge to endEdge +// (edgeEdge.endVert must == startEdge.endVert), updating each edge to point +// to vert instead. +void Manifold::Impl::UpdateVert(int vert, int startEdge, int endEdge) { + int current = startEdge; + while (current != endEdge) { + halfedge_[current].endVert = vert; + current = NextHalfedge(current); + halfedge_[current].startVert = vert; + current = halfedge_[current].pairedHalfedge; + ASSERT(current != startEdge, logicErr, "infinite loop in decimator!"); + } +} + +// In the event that the edge collapse would create a non-manifold edge, +// instead we duplicate the two verts and attach the manifolds the other way +// across this edge. +void Manifold::Impl::FormLoop(int current, int end) { + int startVert = vertPos_.size(); + vertPos_.push_back(vertPos_[halfedge_[current].startVert]); + int endVert = vertPos_.size(); + vertPos_.push_back(vertPos_[halfedge_[current].endVert]); + + int oldMatch = halfedge_[current].pairedHalfedge; + int newMatch = halfedge_[end].pairedHalfedge; + + UpdateVert(startVert, oldMatch, newMatch); + UpdateVert(endVert, end, current); + + halfedge_[current].pairedHalfedge = newMatch; + halfedge_[newMatch].pairedHalfedge = current; + halfedge_[end].pairedHalfedge = oldMatch; + halfedge_[oldMatch].pairedHalfedge = end; + + RemoveIfFolded(end); +} + +void Manifold::Impl::CollapseTri(const glm::ivec3& triEdge) { + if (halfedge_[triEdge[1]].pairedHalfedge == -1) return; + int pair1 = halfedge_[triEdge[1]].pairedHalfedge; + int pair2 = halfedge_[triEdge[2]].pairedHalfedge; + halfedge_[pair1].pairedHalfedge = pair2; + halfedge_[pair2].pairedHalfedge = pair1; + for (int i : {0, 1, 2}) { + halfedge_[triEdge[i]] = {-1, -1, -1, -1}; + } +} + +void Manifold::Impl::RemoveIfFolded(int edge) { + const glm::ivec3 tri0edge = TriOf(edge); + const glm::ivec3 tri1edge = TriOf(halfedge_[edge].pairedHalfedge); + if (halfedge_[tri0edge[1]].pairedHalfedge == -1) return; + if (halfedge_[tri0edge[1]].endVert == halfedge_[tri1edge[1]].endVert) { + if (halfedge_[tri0edge[1]].pairedHalfedge == tri1edge[2]) { + if (halfedge_[tri0edge[2]].pairedHalfedge == tri1edge[1]) { + for (int i : {0, 1, 2}) + vertPos_[halfedge_[tri0edge[i]].startVert] = glm::vec3(NAN); + } else { + vertPos_[halfedge_[tri0edge[1]].startVert] = glm::vec3(NAN); + } + } else { + if (halfedge_[tri0edge[2]].pairedHalfedge == tri1edge[1]) { + vertPos_[halfedge_[tri1edge[1]].startVert] = glm::vec3(NAN); + } + } + PairUp(halfedge_[tri0edge[1]].pairedHalfedge, + halfedge_[tri1edge[2]].pairedHalfedge); + PairUp(halfedge_[tri0edge[2]].pairedHalfedge, + halfedge_[tri1edge[1]].pairedHalfedge); + for (int i : {0, 1, 2}) { + halfedge_[tri0edge[i]] = {-1, -1, -1, -1}; + halfedge_[tri1edge[i]] = {-1, -1, -1, -1}; + } + } +} + +// Collapses the given edge by removing startVert. May split the mesh +// topologically if the collapse would have resulted in a 4-manifold edge. Do +// not collapse an edge if startVert is pinched - the vert will be marked NaN, +// but other edges may still be pointing to it. +void Manifold::Impl::CollapseEdge(const int edge, std::vector& edges) { + Vec& triRef = meshRelation_.triRef; + Vec& triProp = meshRelation_.triProperties; + + const Halfedge toRemove = halfedge_[edge]; + if (toRemove.pairedHalfedge < 0) return; + + const int endVert = toRemove.endVert; + const glm::ivec3 tri0edge = TriOf(edge); + const glm::ivec3 tri1edge = TriOf(toRemove.pairedHalfedge); + + const glm::vec3 pNew = vertPos_[endVert]; + const glm::vec3 pOld = vertPos_[toRemove.startVert]; + const glm::vec3 delta = pNew - pOld; + const bool shortEdge = glm::dot(delta, delta) < precision_ * precision_; + + // Orbit endVert + int current = halfedge_[tri0edge[1]].pairedHalfedge; + while (current != tri1edge[2]) { + current = NextHalfedge(current); + edges.push_back(current); + current = halfedge_[current].pairedHalfedge; + } + + // Orbit startVert + int start = halfedge_[tri1edge[1]].pairedHalfedge; + if (!shortEdge) { + current = start; + TriRef refCheck = triRef[toRemove.pairedHalfedge / 3]; + glm::vec3 pLast = vertPos_[halfedge_[tri1edge[1]].endVert]; + while (current != tri0edge[2]) { + current = NextHalfedge(current); + glm::vec3 pNext = vertPos_[halfedge_[current].endVert]; + const int tri = current / 3; + const TriRef ref = triRef[tri]; + const glm::mat3x2 projection = GetAxisAlignedProjection(faceNormal_[tri]); + // Don't collapse if the edge is not redundant (this may have changed due + // to the collapse of neighbors). + if (!ref.SameFace(refCheck)) { + refCheck = triRef[edge / 3]; + if (!ref.SameFace(refCheck)) { + return; + } else { + // Don't collapse if the edges separating the faces are not colinear + // (can happen when the two faces are coplanar). + if (CCW(projection * pOld, projection * pLast, projection * pNew, + precision_) != 0) + return; + } + } + + // Don't collapse edge if it would cause a triangle to invert. + if (CCW(projection * pNext, projection * pLast, projection * pNew, + precision_) < 0) + return; + + pLast = pNext; + current = halfedge_[current].pairedHalfedge; + } + } + + // Remove toRemove.startVert and replace with endVert. + vertPos_[toRemove.startVert] = glm::vec3(NAN); + CollapseTri(tri1edge); + + // Orbit startVert + const int tri0 = edge / 3; + const int tri1 = toRemove.pairedHalfedge / 3; + const int triVert0 = (edge + 1) % 3; + const int triVert1 = toRemove.pairedHalfedge % 3; + current = start; + while (current != tri0edge[2]) { + current = NextHalfedge(current); + + if (triProp.size() > 0) { + // Update the shifted triangles to the vertBary of endVert + const int tri = current / 3; + const int vIdx = current - 3 * tri; + if (triRef[tri].SameFace(triRef[tri0])) { + triProp[tri][vIdx] = triProp[tri0][triVert0]; + } else if (triRef[tri].SameFace(triRef[tri1])) { + triProp[tri][vIdx] = triProp[tri1][triVert1]; + } + } + + const int vert = halfedge_[current].endVert; + const int next = halfedge_[current].pairedHalfedge; + for (int i = 0; i < edges.size(); ++i) { + if (vert == halfedge_[edges[i]].endVert) { + FormLoop(edges[i], current); + start = next; + edges.resize(i); + break; + } + } + current = next; + } + + UpdateVert(endVert, start, tri0edge[2]); + CollapseTri(tri0edge); + RemoveIfFolded(start); +} + +void Manifold::Impl::RecursiveEdgeSwap(const int edge, int& tag, + std::vector& visited, + std::vector& edgeSwapStack, + std::vector& edges) { + Vec& triRef = meshRelation_.triRef; + + if (edge < 0) return; + const int pair = halfedge_[edge].pairedHalfedge; + if (pair < 0) return; + + // avoid infinite recursion + if (visited[edge] == tag && visited[pair] == tag) return; + + const glm::ivec3 tri0edge = TriOf(edge); + const glm::ivec3 tri1edge = TriOf(pair); + const glm::ivec3 perm0 = TriOf(edge % 3); + const glm::ivec3 perm1 = TriOf(pair % 3); + + glm::mat3x2 projection = GetAxisAlignedProjection(faceNormal_[edge / 3]); + glm::vec2 v[4]; + for (int i : {0, 1, 2}) + v[i] = projection * vertPos_[halfedge_[tri0edge[i]].startVert]; + // Only operate on the long edge of a degenerate triangle. + if (CCW(v[0], v[1], v[2], precision_) > 0 || !Is01Longest(v[0], v[1], v[2])) + return; + + // Switch to neighbor's projection. + projection = GetAxisAlignedProjection(faceNormal_[halfedge_[pair].face]); + for (int i : {0, 1, 2}) + v[i] = projection * vertPos_[halfedge_[tri0edge[i]].startVert]; + v[3] = projection * vertPos_[halfedge_[tri1edge[2]].startVert]; + + auto SwapEdge = [&]() { + // The 0-verts are swapped to the opposite 2-verts. + const int v0 = halfedge_[tri0edge[2]].startVert; + const int v1 = halfedge_[tri1edge[2]].startVert; + halfedge_[tri0edge[0]].startVert = v1; + halfedge_[tri0edge[2]].endVert = v1; + halfedge_[tri1edge[0]].startVert = v0; + halfedge_[tri1edge[2]].endVert = v0; + PairUp(tri0edge[0], halfedge_[tri1edge[2]].pairedHalfedge); + PairUp(tri1edge[0], halfedge_[tri0edge[2]].pairedHalfedge); + PairUp(tri0edge[2], tri1edge[2]); + // Both triangles are now subsets of the neighboring triangle. + const int tri0 = halfedge_[tri0edge[0]].face; + const int tri1 = halfedge_[tri1edge[0]].face; + faceNormal_[tri0] = faceNormal_[tri1]; + triRef[tri0] = triRef[tri1]; + const float l01 = glm::length(v[1] - v[0]); + const float l02 = glm::length(v[2] - v[0]); + const float a = glm::max(0.0f, glm::min(1.0f, l02 / l01)); + // Update properties if applicable + if (meshRelation_.properties.size() > 0) { + Vec& triProp = meshRelation_.triProperties; + Vec& prop = meshRelation_.properties; + triProp[tri0] = triProp[tri1]; + triProp[tri0][perm0[1]] = triProp[tri1][perm1[0]]; + triProp[tri0][perm0[0]] = triProp[tri1][perm1[2]]; + const int numProp = NumProp(); + const int newProp = prop.size() / numProp; + const int propIdx0 = triProp[tri1][perm1[0]]; + const int propIdx1 = triProp[tri1][perm1[1]]; + for (int p = 0; p < numProp; ++p) { + prop.push_back(a * prop[numProp * propIdx0 + p] + + (1 - a) * prop[numProp * propIdx1 + p]); + } + triProp[tri1][perm1[0]] = newProp; + triProp[tri0][perm0[2]] = newProp; + } + + // if the new edge already exists, duplicate the verts and split the mesh. + int current = halfedge_[tri1edge[0]].pairedHalfedge; + const int endVert = halfedge_[tri1edge[1]].endVert; + while (current != tri0edge[1]) { + current = NextHalfedge(current); + if (halfedge_[current].endVert == endVert) { + FormLoop(tri0edge[2], current); + RemoveIfFolded(tri0edge[2]); + return; + } + current = halfedge_[current].pairedHalfedge; + } + }; + + // Only operate if the other triangles are not degenerate. + if (CCW(v[1], v[0], v[3], precision_) <= 0) { + if (!Is01Longest(v[1], v[0], v[3])) return; + // Two facing, long-edge degenerates can swap. + SwapEdge(); + const glm::vec2 e23 = v[3] - v[2]; + if (glm::dot(e23, e23) < precision_ * precision_) { + tag++; + CollapseEdge(tri0edge[2], edges); + edges.resize(0); + } else { + visited[edge] = tag; + visited[pair] = tag; + edgeSwapStack.insert(edgeSwapStack.end(), {tri1edge[1], tri1edge[0], + tri0edge[1], tri0edge[0]}); + } + return; + } else if (CCW(v[0], v[3], v[2], precision_) <= 0 || + CCW(v[1], v[2], v[3], precision_) <= 0) { + return; + } + // Normal path + SwapEdge(); + visited[edge] = tag; + visited[pair] = tag; + edgeSwapStack.insert(edgeSwapStack.end(), + {halfedge_[tri1edge[0]].pairedHalfedge, + halfedge_[tri0edge[1]].pairedHalfedge}); +} + +void Manifold::Impl::SplitPinchedVerts() { + ZoneScoped; + std::vector vertProcessed(NumVert(), false); + std::vector halfedgeProcessed(halfedge_.size(), false); + for (int i = 0; i < halfedge_.size(); ++i) { + if (halfedgeProcessed[i]) continue; + int vert = halfedge_[i].startVert; + if (vertProcessed[vert]) { + vertPos_.push_back(vertPos_[vert]); + vert = NumVert() - 1; + } else { + vertProcessed[vert] = true; + } + ForVert(i, [this, &halfedgeProcessed, vert](int current) { + halfedgeProcessed[current] = true; + halfedge_[current].startVert = vert; + halfedge_[halfedge_[current].pairedHalfedge].endVert = vert; + }); + } +} +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/face_op.cpp b/thirdparty/manifold/src/manifold/src/face_op.cpp new file mode 100644 index 000000000000..29bc90488bf1 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/face_op.cpp @@ -0,0 +1,321 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#if MANIFOLD_PAR == 'T' && __has_include() +#include +#define TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS 1 +#include +#endif +#include + +#include "impl.h" +#include "polygon.h" + +namespace manifold { + +using GeneralTriangulation = std::function(int)>; +using AddTriangle = std::function; + +/** + * Triangulates the faces. In this case, the halfedge_ vector is not yet a set + * of triangles as required by this data structure, but is instead a set of + * general faces with the input faceEdge vector having length of the number of + * faces + 1. The values are indicies into the halfedge_ vector for the first + * edge of each face, with the final value being the length of the halfedge_ + * vector itself. Upon return, halfedge_ has been lengthened and properly + * represents the mesh as a set of triangles as usual. In this process the + * faceNormal_ values are retained, repeated as necessary. + */ +void Manifold::Impl::Face2Tri(const Vec& faceEdge, + const Vec& halfedgeRef) { + ZoneScoped; + Vec triVerts; + Vec triNormal; + Vec& triRef = meshRelation_.triRef; + triRef.resize(0); + auto processFace = [&](GeneralTriangulation general, AddTriangle addTri, + int face) { + const int firstEdge = faceEdge[face]; + const int lastEdge = faceEdge[face + 1]; + const int numEdge = lastEdge - firstEdge; + ASSERT(numEdge >= 3, topologyErr, "face has less than three edges."); + const glm::vec3 normal = faceNormal_[face]; + + if (numEdge == 3) { // Single triangle + int mapping[3] = {halfedge_[firstEdge].startVert, + halfedge_[firstEdge + 1].startVert, + halfedge_[firstEdge + 2].startVert}; + glm::ivec3 tri(halfedge_[firstEdge].startVert, + halfedge_[firstEdge + 1].startVert, + halfedge_[firstEdge + 2].startVert); + glm::ivec3 ends(halfedge_[firstEdge].endVert, + halfedge_[firstEdge + 1].endVert, + halfedge_[firstEdge + 2].endVert); + if (ends[0] == tri[2]) { + std::swap(tri[1], tri[2]); + std::swap(ends[1], ends[2]); + } + ASSERT(ends[0] == tri[1] && ends[1] == tri[2] && ends[2] == tri[0], + topologyErr, "These 3 edges do not form a triangle!"); + + addTri(face, tri, normal, halfedgeRef[firstEdge]); + } else if (numEdge == 4) { // Pair of triangles + int mapping[4] = {halfedge_[firstEdge].startVert, + halfedge_[firstEdge + 1].startVert, + halfedge_[firstEdge + 2].startVert, + halfedge_[firstEdge + 3].startVert}; + const glm::mat3x2 projection = GetAxisAlignedProjection(normal); + auto triCCW = [&projection, this](const glm::ivec3 tri) { + return CCW(projection * this->vertPos_[tri[0]], + projection * this->vertPos_[tri[1]], + projection * this->vertPos_[tri[2]], precision_) >= 0; + }; + + glm::ivec3 tri0(halfedge_[firstEdge].startVert, + halfedge_[firstEdge].endVert, -1); + glm::ivec3 tri1(-1, -1, tri0[0]); + for (const int i : {1, 2, 3}) { + if (halfedge_[firstEdge + i].startVert == tri0[1]) { + tri0[2] = halfedge_[firstEdge + i].endVert; + tri1[0] = tri0[2]; + } + if (halfedge_[firstEdge + i].endVert == tri0[0]) { + tri1[1] = halfedge_[firstEdge + i].startVert; + } + } + ASSERT(glm::all(glm::greaterThanEqual(tri0, glm::ivec3(0))) && + glm::all(glm::greaterThanEqual(tri1, glm::ivec3(0))), + topologyErr, "non-manifold quad!"); + bool firstValid = triCCW(tri0) && triCCW(tri1); + tri0[2] = tri1[1]; + tri1[2] = tri0[1]; + bool secondValid = triCCW(tri0) && triCCW(tri1); + + if (!secondValid) { + tri0[2] = tri1[0]; + tri1[2] = tri0[0]; + } else if (firstValid) { + glm::vec3 firstCross = vertPos_[tri0[0]] - vertPos_[tri1[0]]; + glm::vec3 secondCross = vertPos_[tri0[1]] - vertPos_[tri1[1]]; + if (glm::dot(firstCross, firstCross) < + glm::dot(secondCross, secondCross)) { + tri0[2] = tri1[0]; + tri1[2] = tri0[0]; + } + } + + for (const auto& tri : {tri0, tri1}) { + addTri(face, tri, normal, halfedgeRef[firstEdge]); + } + } else { // General triangulation + for (const auto& tri : general(face)) { + addTri(face, tri, normal, halfedgeRef[firstEdge]); + } + } + }; + auto generalTriangulation = [&](int face) { + const glm::vec3 normal = faceNormal_[face]; + const glm::mat3x2 projection = GetAxisAlignedProjection(normal); + const PolygonsIdx polys = + Face2Polygons(halfedge_.cbegin() + faceEdge[face], + halfedge_.cbegin() + faceEdge[face + 1], projection); + return TriangulateIdx(polys, precision_); + }; +#if MANIFOLD_PAR == 'T' && __has_include() + tbb::task_group group; + // map from face to triangle + tbb::concurrent_unordered_map> results; + Vec triCount(faceEdge.size()); + triCount.back() = 0; + // precompute number of triangles per face, and launch async tasks to + // triangulate complex faces + for_each(autoPolicy(faceEdge.size()), countAt(0_z), + countAt(faceEdge.size() - 1), [&](size_t face) { + triCount[face] = faceEdge[face + 1] - faceEdge[face] - 2; + ASSERT(triCount[face] >= 1, topologyErr, + "face has less than three edges."); + if (triCount[face] > 2) + group.run([&, face] { + std::vector newTris = generalTriangulation(face); + triCount[face] = newTris.size(); + results[face] = std::move(newTris); + }); + }); + group.wait(); + // prefix sum computation (assign unique index to each face) and preallocation + exclusive_scan(autoPolicy(triCount.size()), triCount.begin(), triCount.end(), + triCount.begin(), 0_z); + if (triCount.back() >= std::numeric_limits::max()) + throw std::out_of_range("too many triangles"); + triVerts.resize(triCount.back()); + triNormal.resize(triCount.back()); + triRef.resize(triCount.back()); + + auto processFace2 = std::bind( + processFace, [&](size_t face) { return std::move(results[face]); }, + [&](int face, glm::ivec3 tri, glm::vec3 normal, TriRef r) { + triVerts[triCount[face]] = tri; + triNormal[triCount[face]] = normal; + triRef[triCount[face]] = r; + triCount[face]++; + }, + std::placeholders::_1); + // set triangles in parallel + for_each(autoPolicy(faceEdge.size()), countAt(0_z), + countAt(faceEdge.size() - 1), processFace2); +#else + triVerts.reserve(faceEdge.size()); + triNormal.reserve(faceEdge.size()); + triRef.reserve(faceEdge.size()); + auto processFace2 = std::bind( + processFace, generalTriangulation, + [&](int _face, glm::ivec3 tri, glm::vec3 normal, TriRef r) { + triVerts.push_back(tri); + triNormal.push_back(normal); + triRef.push_back(r); + }, + std::placeholders::_1); + for (int face = 0; face < faceEdge.size() - 1; ++face) { + processFace2(face); + } +#endif + + faceNormal_ = std::move(triNormal); + CreateHalfedges(triVerts); +} + +/** + * Returns a set of 2D polygons formed by the input projection of the vertices + * of the list of Halfedges, which must be an even-manifold, meaning each vert + * must be referenced the same number of times as a startVert and endVert. + */ +PolygonsIdx Manifold::Impl::Face2Polygons(VecView::IterC start, + VecView::IterC end, + glm::mat3x2 projection) const { + std::multimap vert_edge; + for (auto edge = start; edge != end; ++edge) { + vert_edge.emplace(std::make_pair(edge->startVert, edge - start)); + } + + PolygonsIdx polys; + int startEdge = 0; + int thisEdge = startEdge; + while (1) { + if (thisEdge == startEdge) { + if (vert_edge.empty()) break; + startEdge = vert_edge.begin()->second; + thisEdge = startEdge; + polys.push_back({}); + } + int vert = (start + thisEdge)->startVert; + polys.back().push_back({projection * vertPos_[vert], vert}); + const auto result = vert_edge.find((start + thisEdge)->endVert); + ASSERT(result != vert_edge.end(), topologyErr, "non-manifold edge"); + thisEdge = result->second; + vert_edge.erase(result); + } + return polys; +} + +CrossSection Manifold::Impl::Slice(float height) const { + Box plane = bBox_; + plane.min.z = plane.max.z = height; + Vec query; + query.push_back(plane); + const SparseIndices collisions = + collider_.Collisions(query.cview()); + + std::unordered_set tris; + for (int i = 0; i < collisions.size(); ++i) { + const int tri = collisions.Get(i, 1); + float min = std::numeric_limits::infinity(); + float max = -std::numeric_limits::infinity(); + for (const int j : {0, 1, 2}) { + const float z = vertPos_[halfedge_[3 * tri + j].startVert].z; + min = glm::min(min, z); + max = glm::max(max, z); + } + + if (min <= height && max > height) { + tris.insert(tri); + } + } + + Polygons polys; + while (!tris.empty()) { + const int startTri = *tris.begin(); + SimplePolygon poly; + + int k = 0; + for (const int j : {0, 1, 2}) { + if (vertPos_[halfedge_[3 * startTri + j].startVert].z > height && + vertPos_[halfedge_[3 * startTri + Next3(j)].startVert].z <= height) { + k = Next3(j); + break; + } + } + + int tri = startTri; + do { + tris.erase(tris.find(tri)); + if (vertPos_[halfedge_[3 * tri + k].endVert].z <= height) { + k = Next3(k); + } + + Halfedge up = halfedge_[3 * tri + k]; + const glm::vec3 below = vertPos_[up.startVert]; + const glm::vec3 above = vertPos_[up.endVert]; + const float a = (height - below.z) / (above.z - below.z); + poly.push_back(glm::vec2(glm::mix(below, above, a))); + + const int pair = up.pairedHalfedge; + tri = pair / 3; + k = Next3(pair % 3); + } while (tri != startTri); + + polys.push_back(poly); + } + + return CrossSection(polys); +} + +CrossSection Manifold::Impl::Project() const { + const glm::mat3x2 projection = GetAxisAlignedProjection({0, 0, 1}); + auto policy = autoPolicy(halfedge_.size()); + + Vec cusps(NumEdge()); + cusps.resize(copy_if( + policy, halfedge_.cbegin(), halfedge_.cend(), cusps.begin(), + [&](Halfedge edge) { + return faceNormal_[edge.face].z >= 0 && + faceNormal_[halfedge_[edge.pairedHalfedge].face].z < + 0; + }) - + cusps.begin()); + + PolygonsIdx polysIndexed = + Face2Polygons(cusps.cbegin(), cusps.cend(), projection); + + Polygons polys; + for (const auto& poly : polysIndexed) { + SimplePolygon simple; + for (const PolyVert& polyVert : poly) { + simple.push_back(polyVert.pos); + } + polys.push_back(simple); + } + + return CrossSection(polys).Simplify(precision_); +} +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/impl.cpp b/thirdparty/manifold/src/manifold/src/impl.cpp new file mode 100644 index 000000000000..ed8373ce1158 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/impl.cpp @@ -0,0 +1,956 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "impl.h" + +#include +#include +#include +#include + +#include "hashtable.h" +#include "mesh_fixes.h" +#include "par.h" +#include "svd.h" +#include "tri_dist.h" + +namespace { +using namespace manifold; + +constexpr uint64_t kRemove = std::numeric_limits::max(); + +void AtomicAddVec3(glm::vec3& target, const glm::vec3& add) { + for (int i : {0, 1, 2}) { + std::atomic& tar = reinterpret_cast&>(target[i]); + float old_val = tar.load(std::memory_order_relaxed); + while (!tar.compare_exchange_weak(old_val, old_val + add[i], + std::memory_order_relaxed)) + ; + } +} + +struct Normalize { + void operator()(glm::vec3& v) { v = SafeNormalize(v); } +}; + +struct Transform4x3 { + const glm::mat4x3 transform; + + glm::vec3 operator()(glm::vec3 position) { + return transform * glm::vec4(position, 1.0f); + } +}; + +struct AssignNormals { + VecView vertNormal; + VecView vertPos; + VecView halfedges; + const float precision; + const bool calculateTriNormal; + + void operator()(thrust::tuple in) { + glm::vec3& triNormal = thrust::get<0>(in); + const int face = thrust::get<1>(in); + + glm::ivec3 triVerts; + for (int i : {0, 1, 2}) triVerts[i] = halfedges[3 * face + i].startVert; + + glm::vec3 edge[3]; + for (int i : {0, 1, 2}) { + const int j = (i + 1) % 3; + edge[i] = glm::normalize(vertPos[triVerts[j]] - vertPos[triVerts[i]]); + } + + if (calculateTriNormal) { + triNormal = glm::normalize(glm::cross(edge[0], edge[1])); + if (isnan(triNormal.x)) triNormal = glm::vec3(0, 0, 1); + } + + // corner angles + glm::vec3 phi; + float dot = -glm::dot(edge[2], edge[0]); + phi[0] = dot >= 1 ? 0 : (dot <= -1 ? glm::pi() : glm::acos(dot)); + dot = -glm::dot(edge[0], edge[1]); + phi[1] = dot >= 1 ? 0 : (dot <= -1 ? glm::pi() : glm::acos(dot)); + phi[2] = glm::pi() - phi[0] - phi[1]; + + // assign weighted sum + for (int i : {0, 1, 2}) { + AtomicAddVec3(vertNormal[triVerts[i]], phi[i] * triNormal); + } + } +}; + +struct Tri2Halfedges { + VecView halfedges; + VecView edges; + + void operator()(thrust::tuple in) { + const int tri = thrust::get<0>(in); + const glm::ivec3& triVerts = thrust::get<1>(in); + for (const int i : {0, 1, 2}) { + const int j = (i + 1) % 3; + const int edge = 3 * tri + i; + halfedges[edge] = {triVerts[i], triVerts[j], -1, tri}; + // Sort the forward halfedges in front of the backward ones by setting the + // highest-order bit. + edges[edge] = glm::uint64_t(triVerts[i] < triVerts[j] ? 1 : 0) << 63 | + ((glm::uint64_t)glm::min(triVerts[i], triVerts[j])) << 32 | + glm::max(triVerts[i], triVerts[j]); + } + } +}; + +struct LinkHalfedges { + VecView halfedges; + VecView ids; + const int numEdge; + + void operator()(int i) { + const int pair0 = ids[i]; + const int pair1 = ids[i + numEdge]; + halfedges[pair0].pairedHalfedge = pair1; + halfedges[pair1].pairedHalfedge = pair0; + } +}; + +struct MarkVerts { + VecView vert; + + void operator()(glm::ivec3 triVerts) { + for (int i : {0, 1, 2}) { + reinterpret_cast*>(&vert[triVerts[i]]) + ->store(1, std::memory_order_relaxed); + } + } +}; + +struct ReindexTriVerts { + VecView old2new; + + void operator()(glm::ivec3& triVerts) { + for (int i : {0, 1, 2}) { + triVerts[i] = old2new[triVerts[i]]; + } + } +}; + +struct InitializeTriRef { + const int meshID; + VecView halfedge; + + void operator()(thrust::tuple inOut) { + TriRef& baryRef = thrust::get<0>(inOut); + int tri = thrust::get<1>(inOut); + + baryRef.meshID = meshID; + baryRef.originalID = meshID; + baryRef.tri = tri; + } +}; + +struct UpdateMeshID { + const HashTableD meshIDold2new; + + void operator()(TriRef& ref) { ref.meshID = meshIDold2new[ref.meshID]; } +}; + +struct CoplanarEdge { + VecView triArea; + VecView halfedge; + VecView vertPos; + VecView triRef; + VecView triProp; + VecView prop; + VecView propTol; + const int numProp; + const float precision; + + // FIXME: race condition + void operator()( + thrust::tuple&, thrust::pair&, int> + inOut) { + thrust::pair& face2face = thrust::get<0>(inOut); + thrust::pair& vert2vert = thrust::get<1>(inOut); + const int edgeIdx = thrust::get<2>(inOut); + + const Halfedge edge = halfedge[edgeIdx]; + const Halfedge pair = halfedge[edge.pairedHalfedge]; + + if (triRef[edge.face].meshID != triRef[pair.face].meshID) return; + + const glm::vec3 base = vertPos[edge.startVert]; + const int baseNum = edgeIdx - 3 * edge.face; + const int jointNum = edge.pairedHalfedge - 3 * pair.face; + + if (numProp > 0) { + const int prop0 = triProp[edge.face][baseNum]; + const int prop1 = triProp[pair.face][jointNum == 2 ? 0 : jointNum + 1]; + bool propEqual = true; + for (int p = 0; p < numProp; ++p) { + if (glm::abs(prop[numProp * prop0 + p] - prop[numProp * prop1 + p]) > + propTol[p]) { + propEqual = false; + break; + } + } + if (propEqual) { + vert2vert.first = prop0; + vert2vert.second = prop1; + } + } + + if (!edge.IsForward()) return; + + const int edgeNum = baseNum == 0 ? 2 : baseNum - 1; + const int pairNum = jointNum == 0 ? 2 : jointNum - 1; + const glm::vec3 jointVec = vertPos[pair.startVert] - base; + const glm::vec3 edgeVec = + vertPos[halfedge[3 * edge.face + edgeNum].startVert] - base; + const glm::vec3 pairVec = + vertPos[halfedge[3 * pair.face + pairNum].startVert] - base; + + const float length = glm::max(glm::length(jointVec), glm::length(edgeVec)); + const float lengthPair = + glm::max(glm::length(jointVec), glm::length(pairVec)); + glm::vec3 normal = glm::cross(jointVec, edgeVec); + const float area = glm::length(normal); + const float areaPair = glm::length(glm::cross(pairVec, jointVec)); + triArea[edge.face] = area; + triArea[pair.face] = areaPair; + // Don't link degenerate triangles + if (area < length * precision || areaPair < lengthPair * precision) return; + + const float volume = glm::abs(glm::dot(normal, pairVec)); + // Only operate on coplanar triangles + if (volume > glm::max(area, areaPair) * precision) return; + + // Check property linearity + if (area > 0) { + normal /= area; + for (int i = 0; i < numProp; ++i) { + const float scale = precision / propTol[i]; + + const float baseProp = prop[numProp * triProp[edge.face][baseNum] + i]; + const float jointProp = + prop[numProp * triProp[pair.face][jointNum] + i]; + const float edgeProp = prop[numProp * triProp[edge.face][edgeNum] + i]; + const float pairProp = prop[numProp * triProp[pair.face][pairNum] + i]; + + const glm::vec3 iJointVec = + jointVec + normal * scale * (jointProp - baseProp); + const glm::vec3 iEdgeVec = + edgeVec + normal * scale * (edgeProp - baseProp); + const glm::vec3 iPairVec = + pairVec + normal * scale * (pairProp - baseProp); + + glm::vec3 cross = glm::cross(iJointVec, iEdgeVec); + const float areaP = glm::max( + glm::length(cross), glm::length(glm::cross(iPairVec, iJointVec))); + const float volumeP = glm::abs(glm::dot(cross, iPairVec)); + // Only operate on consistent triangles + if (volumeP > areaP * precision) return; + } + } + + face2face.first = edge.face; + face2face.second = pair.face; + } +}; + +struct CheckCoplanarity { + VecView comp2tri; + VecView halfedge; + VecView vertPos; + std::vector* components; + const float precision; + + void operator()(int tri) { + const int component = (*components)[tri]; + const int referenceTri = comp2tri[component]; + if (referenceTri < 0 || referenceTri == tri) return; + + const glm::vec3 origin = vertPos[halfedge[3 * referenceTri].startVert]; + const glm::vec3 normal = glm::normalize( + glm::cross(vertPos[halfedge[3 * referenceTri + 1].startVert] - origin, + vertPos[halfedge[3 * referenceTri + 2].startVert] - origin)); + + for (const int i : {0, 1, 2}) { + const glm::vec3 vert = vertPos[halfedge[3 * tri + i].startVert]; + // If any component vertex is not coplanar with the component's reference + // triangle, unmark the entire component so that none of its triangles are + // marked coplanar. + if (glm::abs(glm::dot(normal, vert - origin)) > precision) { + reinterpret_cast*>(&comp2tri[component]) + ->store(-1, std::memory_order_relaxed); + break; + } + } + } +}; + +struct EdgeBox { + VecView vertPos; + + void operator()(thrust::tuple inout) { + const TmpEdge& edge = thrust::get<1>(inout); + thrust::get<0>(inout) = Box(vertPos[edge.first], vertPos[edge.second]); + } +}; + +int GetLabels(std::vector& components, + const Vec>& edges, int numNodes) { + UnionFind<> uf(numNodes); + for (auto edge : edges) { + if (edge.first == -1 || edge.second == -1) continue; + if (edge.first >= numNodes || edge.second >= numNodes) continue; + uf.unionXY(edge.first, edge.second); + } + + return uf.connectedComponents(components); +} + +void DedupePropVerts(manifold::Vec& triProp, + const Vec>& vert2vert) { + ZoneScoped; + std::vector vertLabels(vert2vert.size()); + const int numLabels = GetLabels(vertLabels, vert2vert, vert2vert.size()); + + std::vector label2vert(numLabels); + for (int v = 0; v < vert2vert.size(); ++v) { + if (vertLabels[v] < numLabels) { + label2vert[vertLabels[v]] = v; + } + } + for (int tri = 0; tri < triProp.size(); ++tri) { + for (int i : {0, 1, 2}) { + if (triProp[tri][i] < vertLabels.size()) { + triProp[tri][i] = label2vert[vertLabels[triProp[tri][i]]]; + } + } + } +} +} // namespace + +namespace manifold { + +std::atomic Manifold::Impl::meshIDCounter_(1); + +uint32_t Manifold::Impl::ReserveIDs(uint32_t n) { + return Manifold::Impl::meshIDCounter_.fetch_add(n, std::memory_order_relaxed); +} + +Manifold::Impl::Impl(const MeshGL& meshGL, + std::vector propertyTolerance) { + Mesh mesh; + mesh.precision = meshGL.precision; + const int numVert = meshGL.NumVert(); + const int numTri = meshGL.NumTri(); + + if (meshGL.numProp > 3 && + static_cast(numVert) * static_cast(meshGL.numProp - 3) >= + std::numeric_limits::max()) + throw std::out_of_range("mesh too large"); + + if (meshGL.numProp < 3) { + MarkFailure(Error::MissingPositionProperties); + return; + } + + mesh.triVerts.resize(numTri); + if (meshGL.mergeFromVert.size() != meshGL.mergeToVert.size()) { + MarkFailure(Error::MergeVectorsDifferentLengths); + return; + } + + if (!meshGL.runTransform.empty() && + 12 * meshGL.runOriginalID.size() != meshGL.runTransform.size()) { + MarkFailure(Error::TransformWrongLength); + return; + } + + if (!meshGL.runOriginalID.empty() && !meshGL.runIndex.empty() && + meshGL.runOriginalID.size() + 1 != meshGL.runIndex.size()) { + MarkFailure(Error::RunIndexWrongLength); + return; + } + + if (!meshGL.faceID.empty() && meshGL.faceID.size() != meshGL.NumTri()) { + MarkFailure(Error::FaceIDWrongLength); + return; + } + + std::vector prop2vert(numVert); + std::iota(prop2vert.begin(), prop2vert.end(), 0); + for (size_t i = 0; i < meshGL.mergeFromVert.size(); ++i) { + const int from = meshGL.mergeFromVert[i]; + const int to = meshGL.mergeToVert[i]; + if (from >= numVert || to >= numVert) { + MarkFailure(Error::MergeIndexOutOfBounds); + return; + } + prop2vert[from] = to; + } + for (size_t i = 0; i < numTri; ++i) { + for (const size_t j : {0, 1, 2}) { + const int vert = meshGL.triVerts[3 * i + j]; + if (vert < 0 || vert >= numVert) { + MarkFailure(Error::VertexOutOfBounds); + return; + } + mesh.triVerts[i][j] = prop2vert[vert]; + } + } + + MeshRelationD relation; + + if (meshGL.numProp > 3) { + relation.triProperties.resize(numTri); + for (size_t i = 0; i < numTri; ++i) { + for (const size_t j : {0, 1, 2}) { + relation.triProperties[i][j] = meshGL.triVerts[3 * i + j]; + } + } + } + + const int numProp = meshGL.numProp - 3; + relation.numProp = numProp; + relation.properties.resize(meshGL.NumVert() * numProp); + // This will have unreferenced duplicate positions that will be removed by + // Impl::RemoveUnreferencedVerts(). + mesh.vertPos.resize(meshGL.NumVert()); + + for (int i = 0; i < meshGL.NumVert(); ++i) { + for (const int j : {0, 1, 2}) + mesh.vertPos[i][j] = meshGL.vertProperties[meshGL.numProp * i + j]; + for (int j = 0; j < numProp; ++j) + relation.properties[i * numProp + j] = + meshGL.vertProperties[meshGL.numProp * i + 3 + j]; + } + + mesh.halfedgeTangent.resize(meshGL.halfedgeTangent.size() / 4); + for (int i = 0; i < mesh.halfedgeTangent.size(); ++i) { + for (const int j : {0, 1, 2, 3}) + mesh.halfedgeTangent[i][j] = meshGL.halfedgeTangent[4 * i + j]; + } + + if (meshGL.runOriginalID.empty()) { + relation.originalID = Impl::ReserveIDs(1); + } else { + std::vector runIndex = meshGL.runIndex; + if (runIndex.empty()) { + runIndex = {0, 3 * meshGL.NumTri()}; + } + relation.triRef.resize(meshGL.NumTri()); + const int startID = Impl::ReserveIDs(meshGL.runOriginalID.size()); + for (int i = 0; i < meshGL.runOriginalID.size(); ++i) { + const int meshID = startID + i; + const int originalID = meshGL.runOriginalID[i]; + for (int tri = runIndex[i] / 3; tri < runIndex[i + 1] / 3; ++tri) { + TriRef& ref = relation.triRef[tri]; + ref.meshID = meshID; + ref.originalID = originalID; + ref.tri = meshGL.faceID.empty() ? tri : meshGL.faceID[tri]; + } + + if (meshGL.runTransform.empty()) { + relation.meshIDtransform[meshID] = {originalID}; + } else { + const float* m = meshGL.runTransform.data() + 12 * i; + relation.meshIDtransform[meshID] = { + originalID, + {m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8], m[9], m[10], + m[11]}}; + } + } + } + + *this = Impl(mesh, relation, propertyTolerance, !meshGL.faceID.empty()); + + // A Manifold created from an input mesh is never an original - the input is + // the original. + meshRelation_.originalID = -1; +} + +/** + * Create a manifold from an input triangle Mesh. Will return an empty Manifold + * and set an Error Status if the Mesh is not manifold or otherwise invalid. + * TODO: update halfedgeTangent during SimplifyTopology. + */ +Manifold::Impl::Impl(const Mesh& mesh, const MeshRelationD& relation, + const std::vector& propertyTolerance, + bool hasFaceIDs) + : vertPos_(mesh.vertPos), halfedgeTangent_(mesh.halfedgeTangent) { + if (mesh.triVerts.size() >= std::numeric_limits::max()) + throw std::out_of_range("mesh too large"); + meshRelation_ = {relation.originalID, relation.numProp, relation.properties, + relation.meshIDtransform}; + + Vec triVerts; + for (size_t i = 0; i < mesh.triVerts.size(); ++i) { + const glm::ivec3 tri = mesh.triVerts[i]; + // Remove topological degenerates + if (tri[0] != tri[1] && tri[1] != tri[2] && tri[2] != tri[0]) { + triVerts.push_back(tri); + if (relation.triRef.size() > 0) { + meshRelation_.triRef.push_back(relation.triRef[i]); + } + if (relation.triProperties.size() > 0) { + meshRelation_.triProperties.push_back(relation.triProperties[i]); + } + } + } + + if (!IsIndexInBounds(triVerts)) { + MarkFailure(Error::VertexOutOfBounds); + return; + } + RemoveUnreferencedVerts(triVerts); + + CalculateBBox(); + if (!IsFinite()) { + MarkFailure(Error::NonFiniteVertex); + return; + } + SetPrecision(mesh.precision); + + CreateHalfedges(triVerts); + if (!IsManifold()) { + MarkFailure(Error::NotManifold); + return; + } + + SplitPinchedVerts(); + + CalculateNormals(); + + InitializeOriginal(); + if (!hasFaceIDs) { + CreateFaces(propertyTolerance); + } + + SimplifyTopology(); + Finish(); +} + +/** + * Create either a unit tetrahedron, cube or octahedron. The cube is in the + * first octant, while the others are symmetric about the origin. + */ +Manifold::Impl::Impl(Shape shape, const glm::mat4x3 m) { + std::vector vertPos; + std::vector triVerts; + switch (shape) { + case Shape::Tetrahedron: + vertPos = {{-1.0f, -1.0f, 1.0f}, + {-1.0f, 1.0f, -1.0f}, + {1.0f, -1.0f, -1.0f}, + {1.0f, 1.0f, 1.0f}}; + triVerts = {{2, 0, 1}, {0, 3, 1}, {2, 3, 0}, {3, 2, 1}}; + break; + case Shape::Cube: + vertPos = {{0.0f, 0.0f, 0.0f}, // + {0.0f, 0.0f, 1.0f}, // + {0.0f, 1.0f, 0.0f}, // + {0.0f, 1.0f, 1.0f}, // + {1.0f, 0.0f, 0.0f}, // + {1.0f, 0.0f, 1.0f}, // + {1.0f, 1.0f, 0.0f}, // + {1.0f, 1.0f, 1.0f}}; + triVerts = {{1, 0, 4}, {2, 4, 0}, // + {1, 3, 0}, {3, 1, 5}, // + {3, 2, 0}, {3, 7, 2}, // + {5, 4, 6}, {5, 1, 4}, // + {6, 4, 2}, {7, 6, 2}, // + {7, 3, 5}, {7, 5, 6}}; + break; + case Shape::Octahedron: + vertPos = {{1.0f, 0.0f, 0.0f}, // + {-1.0f, 0.0f, 0.0f}, // + {0.0f, 1.0f, 0.0f}, // + {0.0f, -1.0f, 0.0f}, // + {0.0f, 0.0f, 1.0f}, // + {0.0f, 0.0f, -1.0f}}; + triVerts = {{0, 2, 4}, {1, 5, 3}, // + {2, 1, 4}, {3, 5, 0}, // + {1, 3, 4}, {0, 5, 2}, // + {3, 0, 4}, {2, 5, 1}}; + break; + } + vertPos_ = vertPos; + for (auto& v : vertPos_) v = m * glm::vec4(v, 1.0f); + CreateHalfedges(triVerts); + Finish(); + meshRelation_.originalID = ReserveIDs(1); + InitializeOriginal(); + CreateFaces(); +} + +void Manifold::Impl::RemoveUnreferencedVerts(Vec& triVerts) { + ZoneScoped; + Vec vertOld2New(NumVert() + 1, 0); + auto policy = autoPolicy(NumVert()); + for_each(policy, triVerts.cbegin(), triVerts.cend(), + MarkVerts({vertOld2New.view(1)})); + + const Vec oldVertPos = vertPos_; + vertPos_.resize(copy_if( + policy, oldVertPos.cbegin(), oldVertPos.cend(), + vertOld2New.cbegin() + 1, vertPos_.begin(), + thrust::identity()) - + vertPos_.begin()); + + inclusive_scan(policy, vertOld2New.begin() + 1, vertOld2New.end(), + vertOld2New.begin() + 1); + + for_each(policy, triVerts.begin(), triVerts.end(), + ReindexTriVerts({vertOld2New})); +} + +void Manifold::Impl::InitializeOriginal() { + const int meshID = meshRelation_.originalID; + // Don't initialize if it's not an original + if (meshID < 0) return; + meshRelation_.triRef.resize(NumTri()); + for_each_n(autoPolicy(NumTri()), + zip(meshRelation_.triRef.begin(), countAt(0)), NumTri(), + InitializeTriRef({meshID, halfedge_})); + meshRelation_.meshIDtransform.clear(); + meshRelation_.meshIDtransform[meshID] = {meshID}; +} + +void Manifold::Impl::CreateFaces(const std::vector& propertyTolerance) { + ZoneScoped; + Vec propertyToleranceD = + propertyTolerance.empty() ? Vec(meshRelation_.numProp, kTolerance) + : propertyTolerance; + + Vec> face2face(halfedge_.size(), {-1, -1}); + Vec> vert2vert(halfedge_.size(), {-1, -1}); + Vec triArea(NumTri()); + for_each_n( + autoPolicy(halfedge_.size()), + zip(face2face.begin(), vert2vert.begin(), countAt(0)), halfedge_.size(), + CoplanarEdge({triArea, halfedge_, vertPos_, meshRelation_.triRef, + meshRelation_.triProperties, meshRelation_.properties, + propertyToleranceD, meshRelation_.numProp, precision_})); + + if (meshRelation_.triProperties.size() > 0) { + DedupePropVerts(meshRelation_.triProperties, vert2vert); + } + + std::vector components; + const int numComponent = GetLabels(components, face2face, NumTri()); + + Vec comp2tri(numComponent, -1); + for (int tri = 0; tri < NumTri(); ++tri) { + const int comp = components[tri]; + const int current = comp2tri[comp]; + if (current < 0 || triArea[tri] > triArea[current]) { + comp2tri[comp] = tri; + triArea[comp] = triArea[tri]; + } + } + + for_each_n(autoPolicy(halfedge_.size()), countAt(0), NumTri(), + CheckCoplanarity( + {comp2tri, halfedge_, vertPos_, &components, precision_})); + + Vec& triRef = meshRelation_.triRef; + for (int tri = 0; tri < NumTri(); ++tri) { + const int referenceTri = comp2tri[components[tri]]; + if (referenceTri >= 0) { + triRef[tri].tri = referenceTri; + } + } +} + +/** + * Create the halfedge_ data structure from an input triVerts array like Mesh. + */ +void Manifold::Impl::CreateHalfedges(const Vec& triVerts) { + ZoneScoped; + const int numTri = triVerts.size(); + const int numHalfedge = 3 * numTri; + // drop the old value first to avoid copy + halfedge_.resize(0); + halfedge_.resize(numHalfedge); + Vec edge(numHalfedge); + Vec ids(numHalfedge); + auto policy = autoPolicy(numTri); + sequence(policy, ids.begin(), ids.end()); + for_each_n(policy, zip(countAt(0), triVerts.begin()), numTri, + Tri2Halfedges({halfedge_, edge})); + // Stable sort is required here so that halfedges from the same face are + // paired together (the triangles were created in face order). In some + // degenerate situations the triangulator can add the same internal edge in + // two different faces, causing this edge to not be 2-manifold. These are + // fixed by duplicating verts in SimplifyTopology. + stable_sort(policy, zip(edge.begin(), ids.begin()), + zip(edge.end(), ids.end()), + [](const thrust::tuple& a, + const thrust::tuple& b) { + return thrust::get<0>(a) < thrust::get<0>(b); + }); + // Once sorted, the first half of the range is the forward halfedges, which + // correspond to their backward pair at the same offset in the second half + // of the range. + for_each_n(policy, countAt(0), numHalfedge / 2, + LinkHalfedges({halfedge_, ids, numHalfedge / 2})); +} + +/** + * Does a full recalculation of the face bounding boxes, including updating + * the collider, but does not resort the faces. + */ +void Manifold::Impl::Update() { + CalculateBBox(); + Vec faceBox; + Vec faceMorton; + GetFaceBoxMorton(faceBox, faceMorton); + collider_.UpdateBoxes(faceBox); +} + +void Manifold::Impl::MarkFailure(Error status) { + bBox_ = Box(); + vertPos_.resize(0); + halfedge_.resize(0); + vertNormal_.resize(0); + faceNormal_.resize(0); + halfedgeTangent_.resize(0); + meshRelation_ = MeshRelationD(); + status_ = status; +} + +void Manifold::Impl::Warp(std::function warpFunc) { + WarpBatch([&warpFunc](VecView vecs) { + thrust::for_each(thrust::host, vecs.begin(), vecs.end(), warpFunc); + }); +} + +void Manifold::Impl::WarpBatch( + std::function)> warpFunc) { + warpFunc(vertPos_.view()); + CalculateBBox(); + if (!IsFinite()) { + MarkFailure(Error::NonFiniteVertex); + return; + } + Update(); + faceNormal_.resize(0); // force recalculation of triNormal + CalculateNormals(); + SetPrecision(); + CreateFaces(); + Finish(); +} + +Manifold::Impl Manifold::Impl::Transform(const glm::mat4x3& transform_) const { + ZoneScoped; + if (transform_ == glm::mat4x3(1.0f)) return *this; + auto policy = autoPolicy(NumVert()); + Impl result; + result.collider_ = collider_; + result.meshRelation_ = meshRelation_; + result.precision_ = precision_; + result.bBox_ = bBox_; + result.halfedge_ = halfedge_; + result.halfedgeTangent_.resize(halfedgeTangent_.size()); + + result.meshRelation_.originalID = -1; + for (auto& m : result.meshRelation_.meshIDtransform) { + m.second.transform = transform_ * glm::mat4(m.second.transform); + } + + result.vertPos_.resize(NumVert()); + result.faceNormal_.resize(faceNormal_.size()); + result.vertNormal_.resize(vertNormal_.size()); + transform(policy, vertPos_.begin(), vertPos_.end(), result.vertPos_.begin(), + Transform4x3({transform_})); + + glm::mat3 normalTransform = NormalTransform(transform_); + transform(policy, faceNormal_.begin(), faceNormal_.end(), + result.faceNormal_.begin(), TransformNormals({normalTransform})); + transform(policy, vertNormal_.begin(), vertNormal_.end(), + result.vertNormal_.begin(), TransformNormals({normalTransform})); + + const bool invert = glm::determinant(glm::mat3(transform_)) < 0; + + if (halfedgeTangent_.size() > 0) { + for_each_n(policy, zip(result.halfedgeTangent_.begin(), countAt(0)), + halfedgeTangent_.size(), + TransformTangents({glm::mat3(transform_), invert, + halfedgeTangent_, halfedge_})); + } + + if (invert) { + for_each_n(policy, zip(result.meshRelation_.triRef.begin(), countAt(0)), + result.NumTri(), FlipTris({result.halfedge_})); + } + + // This optimization does a cheap collider update if the transform is + // axis-aligned. + if (!result.collider_.Transform(transform_)) result.Update(); + + result.CalculateBBox(); + // Scale the precision by the norm of the 3x3 portion of the transform. + result.precision_ *= SpectralNorm(glm::mat3(transform_)); + // Maximum of inherited precision loss and translational precision loss. + result.SetPrecision(result.precision_); + return result; +} + +/** + * Sets the precision based on the bounding box, and limits its minimum value + * by the optional input. + */ +void Manifold::Impl::SetPrecision(float minPrecision) { + precision_ = MaxPrecision(minPrecision, bBox_); +} + +/** + * If face normals are already present, this function uses them to compute + * vertex normals (angle-weighted pseudo-normals); otherwise it also computes + * the face normals. Face normals are only calculated when needed because + * nearly degenerate faces will accrue rounding error, while the Boolean can + * retain their original normal, which is more accurate and can help with + * merging coplanar faces. + * + * If the face normals have been invalidated by an operation like Warp(), + * ensure you do faceNormal_.resize(0) before calling this function to force + * recalculation. + */ +void Manifold::Impl::CalculateNormals() { + ZoneScoped; + vertNormal_.resize(NumVert()); + auto policy = autoPolicy(NumTri()); + fill(policy, vertNormal_.begin(), vertNormal_.end(), glm::vec3(0)); + bool calculateTriNormal = false; + if (faceNormal_.size() != NumTri()) { + faceNormal_.resize(NumTri()); + calculateTriNormal = true; + } + for_each_n(policy, zip(faceNormal_.begin(), countAt(0)), NumTri(), + AssignNormals({vertNormal_, vertPos_, halfedge_, precision_, + calculateTriNormal})); + for_each(policy, vertNormal_.begin(), vertNormal_.end(), Normalize()); +} + +/** + * Remaps all the contained meshIDs to new unique values to represent new + * instances of these meshes. + */ +void Manifold::Impl::IncrementMeshIDs() { + HashTable meshIDold2new(meshRelation_.meshIDtransform.size() * 2); + // Update keys of the transform map + std::map oldTransforms; + std::swap(meshRelation_.meshIDtransform, oldTransforms); + const int numMeshIDs = oldTransforms.size(); + int nextMeshID = ReserveIDs(numMeshIDs); + for (const auto& pair : oldTransforms) { + meshIDold2new.D().Insert(pair.first, nextMeshID); + meshRelation_.meshIDtransform[nextMeshID++] = pair.second; + } + + const int numTri = NumTri(); + for_each_n(autoPolicy(numTri), meshRelation_.triRef.begin(), numTri, + UpdateMeshID({meshIDold2new.D()})); +} + +/** + * Returns a sparse array of the bounding box overlaps between the edges of + * the input manifold, Q and the faces of this manifold. Returned indices only + * point to forward halfedges. + */ +SparseIndices Manifold::Impl::EdgeCollisions(const Impl& Q, + bool inverted) const { + ZoneScoped; + Vec edges = CreateTmpEdges(Q.halfedge_); + const int numEdge = edges.size(); + Vec QedgeBB(numEdge); + auto policy = autoPolicy(numEdge); + for_each_n(policy, zip(QedgeBB.begin(), edges.cbegin()), numEdge, + EdgeBox({Q.vertPos_})); + + SparseIndices q1p2(0); + if (inverted) + q1p2 = collider_.Collisions(QedgeBB.cview()); + else + q1p2 = collider_.Collisions(QedgeBB.cview()); + + if (inverted) + for_each(policy, countAt(0_z), countAt(q1p2.size()), + ReindexEdge({edges, q1p2})); + else + for_each(policy, countAt(0_z), countAt(q1p2.size()), + ReindexEdge({edges, q1p2})); + return q1p2; +} + +/** + * Returns a sparse array of the input vertices that project inside the XY + * bounding boxes of the faces of this manifold. + */ +SparseIndices Manifold::Impl::VertexCollisionsZ( + VecView vertsIn, bool inverted) const { + ZoneScoped; + if (inverted) + return collider_.Collisions(vertsIn); + else + return collider_.Collisions(vertsIn); +} + +/* + * Returns the minimum gap between two manifolds. Returns a float between + * 0 and searchLength. + */ +float Manifold::Impl::MinGap(const Manifold::Impl& other, + float searchLength) const { + ZoneScoped; + Vec faceBoxOther; + Vec faceMortonOther; + + other.GetFaceBoxMorton(faceBoxOther, faceMortonOther); + + transform(autoPolicy(faceBoxOther.size()), faceBoxOther.begin(), + faceBoxOther.end(), faceBoxOther.begin(), + [searchLength](const Box& box) { + return Box(box.min - glm::vec3(searchLength), + box.max + glm::vec3(searchLength)); + }); + + SparseIndices collisions = collider_.Collisions(faceBoxOther.cview()); + + float minDistanceSquared = transform_reduce( + autoPolicy(collisions.size()), thrust::counting_iterator(0), + thrust::counting_iterator(collisions.size()), + [&collisions, this, &other](int i) { + const int tri = collisions.Get(i, 1); + const int triOther = collisions.Get(i, 0); + + std::array p; + std::array q; + + for (const int j : {0, 1, 2}) { + p[j] = vertPos_[halfedge_[3 * tri + j].startVert]; + q[j] = other.vertPos_[other.halfedge_[3 * triOther + j].startVert]; + } + + return DistanceTriangleTriangleSquared(p, q); + }, + searchLength * searchLength, thrust::minimum()); + + return sqrt(minDistanceSquared); +}; + +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/impl.h b/thirdparty/manifold/src/manifold/src/impl.h new file mode 100644 index 000000000000..859e14905dc4 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/impl.h @@ -0,0 +1,190 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include + +#include "collider.h" +#include "manifold.h" +#include "optional_assert.h" +#include "polygon.h" +#include "shared.h" +#include "sparse.h" +#include "utils.h" +#include "vec.h" + +namespace manifold { + +/** @ingroup Private */ +struct Manifold::Impl { + struct Relation { + int originalID = -1; + glm::mat4x3 transform = glm::mat4x3(1); + bool backSide = false; + }; + struct MeshRelationD { + /// The originalID of this Manifold if it is an original; -1 otherwise. + int originalID = -1; + int numProp = 0; + Vec properties; + std::map meshIDtransform; + Vec triRef; + Vec triProperties; + }; + struct BaryIndices { + int tri, start4, end4; + }; + + Box bBox_; + float precision_ = -1; + Error status_ = Error::NoError; + Vec vertPos_; + Vec halfedge_; + Vec vertNormal_; + Vec faceNormal_; + Vec halfedgeTangent_; + MeshRelationD meshRelation_; + Collider collider_; + + static std::atomic meshIDCounter_; + static uint32_t ReserveIDs(uint32_t); + + Impl() {} + enum class Shape { Tetrahedron, Cube, Octahedron }; + Impl(Shape, const glm::mat4x3 = glm::mat4x3(1)); + + Impl(const MeshGL&, std::vector propertyTolerance = {}); + Impl(const Mesh&, const MeshRelationD& relation, + const std::vector& propertyTolerance = {}, + bool hasFaceIDs = false); + + inline void ForVert(int halfedge, std::function func) { + int current = halfedge; + do { + current = NextHalfedge(halfedge_[current].pairedHalfedge); + func(current); + } while (current != halfedge); + } + + template + void ForVert(int halfedge, std::function transform, + std::function + binaryOp) { + T here = transform(halfedge); + int current = halfedge; + do { + const int nextHalfedge = NextHalfedge(halfedge_[current].pairedHalfedge); + const T next = transform(nextHalfedge); + binaryOp(current, here, next); + here = next; + current = nextHalfedge; + } while (current != halfedge); + } + + void CreateFaces(const std::vector& propertyTolerance = {}); + void RemoveUnreferencedVerts(Vec& triVerts); + void InitializeOriginal(); + void CreateHalfedges(const Vec& triVerts); + void CalculateNormals(); + void IncrementMeshIDs(); + + void Update(); + void MarkFailure(Error status); + void Warp(std::function warpFunc); + void WarpBatch(std::function)> warpFunc); + Impl Transform(const glm::mat4x3& transform) const; + SparseIndices EdgeCollisions(const Impl& B, bool inverted = false) const; + SparseIndices VertexCollisionsZ(VecView vertsIn, + bool inverted = false) const; + float MinGap(const Impl& other, float searchLength) const; + + bool IsEmpty() const { return NumVert() == 0; } + int NumVert() const { return vertPos_.size(); } + int NumEdge() const { return halfedge_.size() / 2; } + int NumTri() const { return halfedge_.size() / 3; } + int NumProp() const { return meshRelation_.numProp; } + int NumPropVert() const { + return NumProp() == 0 ? NumVert() + : meshRelation_.properties.size() / NumProp(); + } + + // properties.cu + Properties GetProperties() const; + void CalculateCurvature(int gaussianIdx, int meanIdx); + void CalculateBBox(); + bool IsFinite() const; + bool IsIndexInBounds(VecView triVerts) const; + void SetPrecision(float minPrecision = -1); + bool IsManifold() const; + bool Is2Manifold() const; + bool MatchesTriNormals() const; + int NumDegenerateTris() const; + + // sort.cu + void Finish(); + void SortVerts(); + void ReindexVerts(const Vec& vertNew2Old, int numOldVert); + void CompactProps(); + void GetFaceBoxMorton(Vec& faceBox, Vec& faceMorton) const; + void SortFaces(Vec& faceBox, Vec& faceMorton); + void GatherFaces(const Vec& faceNew2Old); + void GatherFaces(const Impl& old, const Vec& faceNew2Old); + + // face_op.cu + void Face2Tri(const Vec& faceEdge, const Vec& halfedgeRef); + PolygonsIdx Face2Polygons(VecView::IterC start, + VecView::IterC end, + glm::mat3x2 projection) const; + CrossSection Slice(float height) const; + CrossSection Project() const; + + // edge_op.cu + void SimplifyTopology(); + void DedupeEdge(int edge); + void CollapseEdge(int edge, std::vector& edges); + void RecursiveEdgeSwap(int edge, int& tag, std::vector& visited, + std::vector& edgeSwapStack, + std::vector& edges); + void RemoveIfFolded(int edge); + void PairUp(int edge0, int edge1); + void UpdateVert(int vert, int startEdge, int endEdge); + void FormLoop(int current, int end); + void CollapseTri(const glm::ivec3& triEdge); + void SplitPinchedVerts(); + + // subdivision.cpp + int GetNeighbor(int tri) const; + glm::ivec4 GetHalfedges(int tri) const; + BaryIndices GetIndices(int halfedge) const; + void FillRetainedVerts(Vec& vertBary) const; + Vec Subdivide(std::function); + + // smoothing.cpp + bool IsInsideQuad(int halfedge) const; + bool IsMarkedInsideQuad(int halfedge) const; + glm::vec3 GetNormal(int halfedge, int normalIdx) const; + std::vector UpdateSharpenedEdges( + const std::vector&) const; + Vec FlatFaces() const; + Vec VertFlatFace(const Vec&) const; + std::vector SharpenEdges(float minSharpAngle, + float minSmoothness) const; + void SharpenTangent(int halfedge, float smoothness); + void SetNormals(int normalIdx, float minSharpAngle); + void LinearizeFlatTangents(); + void CreateTangents(int normalIdx); + void CreateTangents(std::vector); + void Refine(std::function); +}; +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/manifold.cpp b/thirdparty/manifold/src/manifold/src/manifold.cpp new file mode 100644 index 000000000000..4b9c46a2f1c1 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/manifold.cpp @@ -0,0 +1,970 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "QuickHull.hpp" +#include "boolean3.h" +#include "csg_tree.h" +#include "impl.h" +#include "par.h" +#include "tri_dist.h" + +namespace { +using namespace manifold; +using namespace thrust::placeholders; + +ExecutionParams manifoldParams; + +struct MakeTri { + VecView halfedges; + + void operator()(thrust::tuple inOut) { + glm::ivec3& tri = thrust::get<0>(inOut); + const int face = 3 * thrust::get<1>(inOut); + + for (int i : {0, 1, 2}) { + tri[i] = halfedges[face + i].startVert; + } + } +}; + +struct UpdateProperties { + float* properties; + const int numProp; + const float* oldProperties; + const int numOldProp; + const glm::vec3* vertPos; + const glm::ivec3* triProperties; + const Halfedge* halfedges; + std::function propFunc; + + void operator()(int tri) { + for (int i : {0, 1, 2}) { + const int vert = halfedges[3 * tri + i].startVert; + const int propVert = triProperties[tri][i]; + propFunc(properties + numProp * propVert, vertPos[vert], + oldProperties + numOldProp * propVert); + } + } +}; + +Manifold Halfspace(Box bBox, glm::vec3 normal, float originOffset) { + normal = glm::normalize(normal); + Manifold cutter = + Manifold::Cube(glm::vec3(2.0f), true).Translate({1.0f, 0.0f, 0.0f}); + float size = glm::length(bBox.Center() - normal * originOffset) + + 0.5f * glm::length(bBox.Size()); + cutter = cutter.Scale(glm::vec3(size)).Translate({originOffset, 0.0f, 0.0f}); + float yDeg = glm::degrees(-glm::asin(normal.z)); + float zDeg = glm::degrees(glm::atan(normal.y, normal.x)); + return cutter.Rotate(0.0f, yDeg, zDeg); +} +} // namespace + +namespace manifold { + +/** + * Construct an empty Manifold. + * + */ +Manifold::Manifold() : pNode_{std::make_shared()} {} +Manifold::~Manifold() = default; +Manifold::Manifold(Manifold&&) noexcept = default; +Manifold& Manifold::operator=(Manifold&&) noexcept = default; + +Manifold::Manifold(const Manifold& other) : pNode_(other.pNode_) {} + +Manifold::Manifold(std::shared_ptr pNode) : pNode_(pNode) {} + +Manifold::Manifold(std::shared_ptr pImpl_) + : pNode_(std::make_shared(pImpl_)) {} + +Manifold Manifold::Invalid() { + auto pImpl_ = std::make_shared(); + pImpl_->status_ = Error::InvalidConstruction; + return Manifold(pImpl_); +} + +Manifold& Manifold::operator=(const Manifold& other) { + if (this != &other) { + pNode_ = other.pNode_; + } + return *this; +} + +CsgLeafNode& Manifold::GetCsgLeafNode() const { + if (pNode_->GetNodeType() != CsgNodeType::Leaf) { + pNode_ = pNode_->ToLeafNode(); + } + return *std::static_pointer_cast(pNode_); +} + +/** + * Convert a MeshGL into a Manifold, retaining its properties and merging only + * the positions according to the merge vectors. Will return an empty Manifold + * and set an Error Status if the result is not an oriented 2-manifold. Will + * collapse degenerate triangles and unnecessary vertices. + * + * All fields are read, making this structure suitable for a lossless round-trip + * of data from GetMeshGL. For multi-material input, use ReserveIDs to set a + * unique originalID for each material, and sort the materials into triangle + * runs. + * + * @param meshGL The input MeshGL. + * @param propertyTolerance A vector of precision values for each property + * beyond position. If specified, the propertyTolerance vector must have size = + * numProp - 3. This is the amount of interpolation error allowed before two + * neighboring triangles are considered to be on a property boundary edge. + * Property boundary edges will be retained across operations even if the + * triangles are coplanar. Defaults to 1e-5, which works well for most + * properties in the [-1, 1] range. + */ +Manifold::Manifold(const MeshGL& meshGL, + const std::vector& propertyTolerance) + : pNode_(std::make_shared( + std::make_shared(meshGL, propertyTolerance))) {} + +/** + * Convert a Mesh into a Manifold. Will return an empty Manifold + * and set an Error Status if the Mesh is not an oriented 2-manifold. Will + * collapse degenerate triangles and unnecessary vertices. + * + * @param mesh The input Mesh. + */ +Manifold::Manifold(const Mesh& mesh) { + Impl::MeshRelationD relation = {(int)ReserveIDs(1)}; + pNode_ = + std::make_shared(std::make_shared(mesh, relation)); +} + +/** + * This returns a Mesh of simple vectors of vertices and triangles suitable for + * saving or other operations outside of the context of this library. + */ +Mesh Manifold::GetMesh() const { + ZoneScoped; + const Impl& impl = *GetCsgLeafNode().GetImpl(); + + Mesh result; + result.precision = Precision(); + result.vertPos.insert(result.vertPos.end(), impl.vertPos_.begin(), + impl.vertPos_.end()); + result.vertNormal.insert(result.vertNormal.end(), impl.vertNormal_.begin(), + impl.vertNormal_.end()); + result.halfedgeTangent.insert(result.halfedgeTangent.end(), + impl.halfedgeTangent_.begin(), + impl.halfedgeTangent_.end()); + + result.triVerts.resize(NumTri()); + // note that `triVerts` is `std::vector`, so we cannot use thrust::device + thrust::for_each_n(thrust::host, zip(result.triVerts.begin(), countAt(0)), + NumTri(), MakeTri({impl.halfedge_})); + + return result; +} + +/** + * The most complete output of this library, returning a MeshGL that is designed + * to easily push into a renderer, including all interleaved vertex properties + * that may have been input. It also includes relations to all the input meshes + * that form a part of this result and the transforms applied to each. + * + * @param normalIdx If the original MeshGL inputs that formed this manifold had + * properties corresponding to normal vectors, you can specify which property + * channels these are (x, y, z), which will cause this output MeshGL to + * automatically update these normals according to the applied transforms and + * front/back side. Each channel must be >= 3 and < numProp, and all original + * MeshGLs must use the same channels for their normals. + */ +MeshGL Manifold::GetMeshGL(glm::ivec3 normalIdx) const { + ZoneScoped; + const Impl& impl = *GetCsgLeafNode().GetImpl(); + + const int numProp = NumProp(); + const int numVert = NumPropVert(); + const int numTri = NumTri(); + + const bool isOriginal = impl.meshRelation_.originalID >= 0; + const bool updateNormals = + !isOriginal && glm::all(glm::greaterThan(normalIdx, glm::ivec3(2))); + + MeshGL out; + out.precision = Precision(); + out.numProp = 3 + numProp; + out.triVerts.resize(3 * numTri); + + const int numHalfedge = impl.halfedgeTangent_.size(); + out.halfedgeTangent.resize(4 * numHalfedge); + for (int i = 0; i < numHalfedge; ++i) { + const glm::vec4 t = impl.halfedgeTangent_[i]; + out.halfedgeTangent[4 * i] = t.x; + out.halfedgeTangent[4 * i + 1] = t.y; + out.halfedgeTangent[4 * i + 2] = t.z; + out.halfedgeTangent[4 * i + 3] = t.w; + } + + // Sort the triangles into runs + out.faceID.resize(numTri); + std::vector triNew2Old(numTri); + std::iota(triNew2Old.begin(), triNew2Old.end(), 0); + VecView triRef = impl.meshRelation_.triRef; + // Don't sort originals - keep them in order + if (!isOriginal) { + std::sort(triNew2Old.begin(), triNew2Old.end(), [triRef](int a, int b) { + return triRef[a].originalID == triRef[b].originalID + ? triRef[a].meshID < triRef[b].meshID + : triRef[a].originalID < triRef[b].originalID; + }); + } + + std::vector runNormalTransform; + + auto addRun = [updateNormals, isOriginal]( + MeshGL& out, std::vector& runNormalTransform, + int tri, const Impl::Relation& rel) { + out.runIndex.push_back(3 * tri); + out.runOriginalID.push_back(rel.originalID); + if (updateNormals) { + runNormalTransform.push_back(NormalTransform(rel.transform) * + (rel.backSide ? -1.0f : 1.0f)); + } + if (!isOriginal) { + for (const int col : {0, 1, 2, 3}) { + for (const int row : {0, 1, 2}) { + out.runTransform.push_back(rel.transform[col][row]); + } + } + } + }; + + auto meshIDtransform = impl.meshRelation_.meshIDtransform; + int lastID = -1; + for (int tri = 0; tri < numTri; ++tri) { + const int oldTri = triNew2Old[tri]; + const auto ref = triRef[oldTri]; + const int meshID = ref.meshID; + + out.faceID[tri] = ref.tri; + for (const int i : {0, 1, 2}) + out.triVerts[3 * tri + i] = impl.halfedge_[3 * oldTri + i].startVert; + + if (meshID != lastID) { + Impl::Relation rel; + auto it = meshIDtransform.find(meshID); + if (it != meshIDtransform.end()) rel = it->second; + addRun(out, runNormalTransform, tri, rel); + meshIDtransform.erase(meshID); + lastID = meshID; + } + } + // Add runs for originals that did not contribute any faces to the output + for (const auto& pair : meshIDtransform) { + addRun(out, runNormalTransform, numTri, pair.second); + } + out.runIndex.push_back(3 * numTri); + + // Early return for no props + if (numProp == 0) { + out.vertProperties.resize(3 * numVert); + for (int i = 0; i < numVert; ++i) { + const glm::vec3 v = impl.vertPos_[i]; + out.vertProperties[3 * i] = v.x; + out.vertProperties[3 * i + 1] = v.y; + out.vertProperties[3 * i + 2] = v.z; + } + return out; + } + + // Duplicate verts with different props + std::vector vert2idx(impl.NumVert(), -1); + std::vector> vertPropPair(impl.NumVert()); + out.vertProperties.reserve(numVert * static_cast(out.numProp)); + + for (int run = 0; run < out.runOriginalID.size(); ++run) { + for (int tri = out.runIndex[run] / 3; tri < out.runIndex[run + 1] / 3; + ++tri) { + const glm::ivec3 triProp = + impl.meshRelation_.triProperties[triNew2Old[tri]]; + for (const int i : {0, 1, 2}) { + const int prop = triProp[i]; + const int vert = out.triVerts[3 * tri + i]; + + auto& bin = vertPropPair[vert]; + bool bFound = false; + for (int k = 0; k < bin.size(); ++k) { + if (bin[k].x == prop) { + bFound = true; + out.triVerts[3 * tri + i] = bin[k].y; + break; + } + } + if (bFound) continue; + const int idx = out.vertProperties.size() / out.numProp; + out.triVerts[3 * tri + i] = idx; + bin.push_back({prop, idx}); + + for (int p : {0, 1, 2}) { + out.vertProperties.push_back(impl.vertPos_[vert][p]); + } + for (int p = 0; p < numProp; ++p) { + out.vertProperties.push_back( + impl.meshRelation_.properties[prop * numProp + p]); + } + + if (updateNormals) { + glm::vec3 normal; + const int start = out.vertProperties.size() - out.numProp; + for (int i : {0, 1, 2}) { + normal[i] = out.vertProperties[start + normalIdx[i]]; + } + normal = glm::normalize(runNormalTransform[run] * normal); + for (int i : {0, 1, 2}) { + out.vertProperties[start + normalIdx[i]] = normal[i]; + } + } + + if (vert2idx[vert] == -1) { + vert2idx[vert] = idx; + } else { + out.mergeFromVert.push_back(idx); + out.mergeToVert.push_back(vert2idx[vert]); + } + } + } + } + return out; +} + +/** + * Does the Manifold have any triangles? + */ +bool Manifold::IsEmpty() const { return GetCsgLeafNode().GetImpl()->IsEmpty(); } +/** + * Returns the reason for an input Mesh producing an empty Manifold. This Status + * only applies to Manifolds newly-created from an input Mesh - once they are + * combined into a new Manifold via operations, the status reverts to NoError, + * simply processing the problem mesh as empty. Likewise, empty meshes may still + * show NoError, for instance if they are small enough relative to their + * precision to be collapsed to nothing. + */ +Manifold::Error Manifold::Status() const { + return GetCsgLeafNode().GetImpl()->status_; +} +/** + * The number of vertices in the Manifold. + */ +int Manifold::NumVert() const { return GetCsgLeafNode().GetImpl()->NumVert(); } +/** + * The number of edges in the Manifold. + */ +int Manifold::NumEdge() const { return GetCsgLeafNode().GetImpl()->NumEdge(); } +/** + * The number of triangles in the Manifold. + */ +int Manifold::NumTri() const { return GetCsgLeafNode().GetImpl()->NumTri(); } +/** + * The number of properties per vertex in the Manifold. + */ +int Manifold::NumProp() const { return GetCsgLeafNode().GetImpl()->NumProp(); } +/** + * The number of property vertices in the Manifold. This will always be >= + * NumVert, as some physical vertices may be duplicated to account for different + * properties on different neighboring triangles. + */ +int Manifold::NumPropVert() const { + return GetCsgLeafNode().GetImpl()->NumPropVert(); +} + +/** + * Returns the axis-aligned bounding box of all the Manifold's vertices. + */ +Box Manifold::BoundingBox() const { return GetCsgLeafNode().GetImpl()->bBox_; } + +/** + * Returns the precision of this Manifold's vertices, which tracks the + * approximate rounding error over all the transforms and operations that have + * led to this state. Any triangles that are colinear within this precision are + * considered degenerate and removed. This is the value of ε defining + * [ε-valid](https://github.com/elalish/manifold/wiki/Manifold-Library#definition-of-%CE%B5-valid). + */ +float Manifold::Precision() const { + return GetCsgLeafNode().GetImpl()->precision_; +} + +/** + * The genus is a topological property of the manifold, representing the number + * of "handles". A sphere is 0, torus 1, etc. It is only meaningful for a single + * mesh, so it is best to call Decompose() first. + */ +int Manifold::Genus() const { + int chi = NumVert() - NumEdge() + NumTri(); + return 1 - chi / 2; +} + +/** + * Returns the surface area and volume of the manifold. + */ +Properties Manifold::GetProperties() const { + return GetCsgLeafNode().GetImpl()->GetProperties(); +} + +/** + * If this mesh is an original, this returns its meshID that can be referenced + * by product manifolds' MeshRelation. If this manifold is a product, this + * returns -1. + */ +int Manifold::OriginalID() const { + return GetCsgLeafNode().GetImpl()->meshRelation_.originalID; +} + +/** + * This function condenses all coplanar faces in the relation, and + * collapses those edges. In the process the relation to ancestor meshes is lost + * and this new Manifold is marked an original. Properties are preserved, so if + * they do not match across an edge, that edge will be kept. + */ +Manifold Manifold::AsOriginal() const { + auto newImpl = std::make_shared(*GetCsgLeafNode().GetImpl()); + newImpl->meshRelation_.originalID = ReserveIDs(1); + newImpl->InitializeOriginal(); + newImpl->CreateFaces(); + newImpl->SimplifyTopology(); + newImpl->Finish(); + return Manifold(std::make_shared(newImpl)); +} + +/** + * Returns the first of n sequential new unique mesh IDs for marking sets of + * triangles that can be looked up after further operations. Assign to + * MeshGL.runOriginalID vector. + */ +uint32_t Manifold::ReserveIDs(uint32_t n) { + return Manifold::Impl::ReserveIDs(n); +} + +/** + * The triangle normal vectors are saved over the course of operations rather + * than recalculated to avoid rounding error. This checks that triangles still + * match their normal vectors within Precision(). + */ +bool Manifold::MatchesTriNormals() const { + return GetCsgLeafNode().GetImpl()->MatchesTriNormals(); +} + +/** + * The number of triangles that are colinear within Precision(). This library + * attempts to remove all of these, but it cannot always remove all of them + * without changing the mesh by too much. + */ +int Manifold::NumDegenerateTris() const { + return GetCsgLeafNode().GetImpl()->NumDegenerateTris(); +} + +/** + * This is a checksum-style verification of the collider, simply returning the + * total number of edge-face bounding box overlaps between this and other. + * + * @param other A Manifold to overlap with. + */ +int Manifold::NumOverlaps(const Manifold& other) const { + SparseIndices overlaps = GetCsgLeafNode().GetImpl()->EdgeCollisions( + *other.GetCsgLeafNode().GetImpl()); + int num_overlaps = overlaps.size(); + + overlaps = other.GetCsgLeafNode().GetImpl()->EdgeCollisions( + *GetCsgLeafNode().GetImpl()); + return num_overlaps + overlaps.size(); +} + +/** + * Move this Manifold in space. This operation can be chained. Transforms are + * combined and applied lazily. + * + * @param v The vector to add to every vertex. + */ +Manifold Manifold::Translate(glm::vec3 v) const { + return Manifold(pNode_->Translate(v)); +} + +/** + * Scale this Manifold in space. This operation can be chained. Transforms are + * combined and applied lazily. + * + * @param v The vector to multiply every vertex by per component. + */ +Manifold Manifold::Scale(glm::vec3 v) const { + return Manifold(pNode_->Scale(v)); +} + +/** + * Applies an Euler angle rotation to the manifold, first about the X axis, then + * Y, then Z, in degrees. We use degrees so that we can minimize rounding error, + * and eliminate it completely for any multiples of 90 degrees. Additionally, + * more efficient code paths are used to update the manifold when the transforms + * only rotate by multiples of 90 degrees. This operation can be chained. + * Transforms are combined and applied lazily. + * + * @param xDegrees First rotation, degrees about the X-axis. + * @param yDegrees Second rotation, degrees about the Y-axis. + * @param zDegrees Third rotation, degrees about the Z-axis. + */ +Manifold Manifold::Rotate(float xDegrees, float yDegrees, + float zDegrees) const { + return Manifold(pNode_->Rotate(xDegrees, yDegrees, zDegrees)); +} + +/** + * Transform this Manifold in space. The first three columns form a 3x3 matrix + * transform and the last is a translation vector. This operation can be + * chained. Transforms are combined and applied lazily. + * + * @param m The affine transform matrix to apply to all the vertices. + */ +Manifold Manifold::Transform(const glm::mat4x3& m) const { + return Manifold(pNode_->Transform(m)); +} + +/** + * Mirror this Manifold over the plane described by the unit form of the given + * normal vector. If the length of the normal is zero, an empty Manifold is + * returned. This operation can be chained. Transforms are combined and applied + * lazily. + * + * @param normal The normal vector of the plane to be mirrored over + */ +Manifold Manifold::Mirror(glm::vec3 normal) const { + if (glm::length(normal) == 0.) { + return Manifold(); + } + auto n = glm::normalize(normal); + auto m = glm::mat4x3(glm::mat3(1.0f) - 2.0f * glm::outerProduct(n, n)); + return Manifold(pNode_->Transform(m)); +} + +/** + * This function does not change the topology, but allows the vertices to be + * moved according to any arbitrary input function. It is easy to create a + * function that warps a geometrically valid object into one which overlaps, but + * that is not checked here, so it is up to the user to choose their function + * with discretion. + * + * @param warpFunc A function that modifies a given vertex position. + */ +Manifold Manifold::Warp(std::function warpFunc) const { + auto pImpl = std::make_shared(*GetCsgLeafNode().GetImpl()); + pImpl->Warp(warpFunc); + return Manifold(std::make_shared(pImpl)); +} + +/** + * Same as Manifold::Warp but calls warpFunc with with + * a VecView which is roughly equivalent to std::span + * pointing to all vec3 elements to be modified in-place + * + * @param warpFunc A function that modifies multiple vertex positions. + */ +Manifold Manifold::WarpBatch( + std::function)> warpFunc) const { + auto pImpl = std::make_shared(*GetCsgLeafNode().GetImpl()); + pImpl->WarpBatch(warpFunc); + return Manifold(std::make_shared(pImpl)); +} + +/** + * Create a new copy of this manifold with updated vertex properties by + * supplying a function that takes the existing position and properties as + * input. You may specify any number of output properties, allowing creation and + * removal of channels. Note: undefined behavior will result if you read past + * the number of input properties or write past the number of output properties. + * + * @param numProp The new number of properties per vertex. + * @param propFunc A function that modifies the properties of a given vertex. + */ +Manifold Manifold::SetProperties( + int numProp, std::function + propFunc) const { + auto pImpl = std::make_shared(*GetCsgLeafNode().GetImpl()); + const int oldNumProp = NumProp(); + const Vec oldProperties = pImpl->meshRelation_.properties; + + auto& triProperties = pImpl->meshRelation_.triProperties; + if (numProp == 0) { + triProperties.resize(0); + pImpl->meshRelation_.properties.resize(0); + } else { + if (triProperties.size() == 0) { + const int numTri = NumTri(); + triProperties.resize(numTri); + int idx = 0; + for (int i = 0; i < numTri; ++i) { + for (const int j : {0, 1, 2}) { + triProperties[i][j] = idx++; + } + } + pImpl->meshRelation_.properties = Vec(numProp * idx, 0); + } else { + pImpl->meshRelation_.properties = Vec(numProp * NumPropVert(), 0); + } + thrust::for_each_n( + thrust::host, countAt(0), NumTri(), + UpdateProperties({pImpl->meshRelation_.properties.data(), numProp, + oldProperties.data(), oldNumProp, + pImpl->vertPos_.data(), triProperties.data(), + pImpl->halfedge_.data(), propFunc})); + } + + pImpl->meshRelation_.numProp = numProp; + pImpl->CreateFaces(); + pImpl->Finish(); + return Manifold(std::make_shared(pImpl)); +} + +/** + * Curvature is the inverse of the radius of curvature, and signed such that + * positive is convex and negative is concave. There are two orthogonal + * principal curvatures at any point on a manifold, with one maximum and the + * other minimum. Gaussian curvature is their product, while mean + * curvature is their sum. This approximates them for every vertex and assigns + * them as vertex properties on the given channels. + * + * @param gaussianIdx The property channel index in which to store the Gaussian + * curvature. An index < 0 will be ignored (stores nothing). The property set + * will be automatically expanded to include the channel index specified. + * + * @param meanIdx The property channel index in which to store the mean + * curvature. An index < 0 will be ignored (stores nothing). The property set + * will be automatically expanded to include the channel index specified. + */ +Manifold Manifold::CalculateCurvature(int gaussianIdx, int meanIdx) const { + auto pImpl = std::make_shared(*GetCsgLeafNode().GetImpl()); + pImpl->CalculateCurvature(gaussianIdx, meanIdx); + return Manifold(std::make_shared(pImpl)); +} + +/** + * Fills in vertex properties for normal vectors, calculated from the mesh + * geometry. Flat faces composed of three or more triangles will remain flat. + * + * @param normalIdx The property channel in which to store the X + * values of the normals. The X, Y, and Z channels will be sequential. The + * property set will be automatically expanded such that NumProp will be at + * least normalIdx + 3. + * + * @param minSharpAngle Any edges with angles greater than this value will + * remain sharp, getting different normal vector properties on each side of the + * edge. By default, no edges are sharp and all normals are shared. With a value + * of zero, the model is faceted and all normals match their triangle normals, + * but in this case it would be better not to calculate normals at all. + */ +Manifold Manifold::CalculateNormals(int normalIdx, float minSharpAngle) const { + auto pImpl = std::make_shared(*GetCsgLeafNode().GetImpl()); + pImpl->SetNormals(normalIdx, minSharpAngle); + return Manifold(std::make_shared(pImpl)); +} + +/** + * Smooths out the Manifold by filling in the halfedgeTangent vectors. The + * geometry will remain unchanged until Refine or RefineToLength is called to + * interpolate the surface. This version uses the supplied vertex normal + * properties to define the tangent vectors. Faces of two coplanar triangles + * will be marked as quads, while faces with three or more will be flat. + * + * @param normalIdx The first property channel of the normals. NumProp must be + * at least normalIdx + 3. Any vertex where multiple normals exist and don't + * agree will result in a sharp edge. + */ +Manifold Manifold::SmoothByNormals(int normalIdx) const { + auto pImpl = std::make_shared(*GetCsgLeafNode().GetImpl()); + if (!IsEmpty()) { + pImpl->CreateTangents(normalIdx); + } + return Manifold(std::make_shared(pImpl)); +} + +/** + * Smooths out the Manifold by filling in the halfedgeTangent vectors. The + * geometry will remain unchanged until Refine or RefineToLength is called to + * interpolate the surface. This version uses the geometry of the triangles and + * pseudo-normals to define the tangent vectors. Faces of two coplanar triangles + * will be marked as quads. + * + * @param minSharpAngle degrees, default 60. Any edges with angles greater than + * this value will remain sharp. The rest will be smoothed to G1 continuity, + * with the caveat that flat faces of three or more triangles will always remain + * flat. With a value of zero, the model is faceted, but in this case there is + * no point in smoothing. + * + * @param minSmoothness range: 0 - 1, default 0. The smoothness applied to sharp + * angles. The default gives a hard edge, while values > 0 will give a small + * fillet on these sharp edges. A value of 1 is equivalent to a minSharpAngle of + * 180 - all edges will be smooth. + */ +Manifold Manifold::SmoothOut(float minSharpAngle, float minSmoothness) const { + auto pImpl = std::make_shared(*GetCsgLeafNode().GetImpl()); + if (!IsEmpty()) { + pImpl->CreateTangents(pImpl->SharpenEdges(minSharpAngle, minSmoothness)); + } + return Manifold(std::make_shared(pImpl)); +} + +/** + * Increase the density of the mesh by splitting every edge into n pieces. For + * instance, with n = 2, each triangle will be split into 4 triangles. Quads + * will ignore their interior triangle bisector. These will all be coplanar (and + * will not be immediately collapsed) unless the Mesh/Manifold has + * halfedgeTangents specified (e.g. from the Smooth() constructor), in which + * case the new vertices will be moved to the interpolated surface according to + * their barycentric coordinates. + * + * @param n The number of pieces to split every edge into. Must be > 1. + */ +Manifold Manifold::Refine(int n) const { + auto pImpl = std::make_shared(*GetCsgLeafNode().GetImpl()); + if (n > 1) { + pImpl->Refine([n](glm::vec3 edge) { return n - 1; }); + } + return Manifold(std::make_shared(pImpl)); +} + +/** + * Increase the density of the mesh by splitting each edge into pieces of + * roughly the input length. Interior verts are added to keep the rest of the + * triangulation edges also of roughly the same length. If halfedgeTangents are + * present (e.g. from the Smooth() constructor), the new vertices will be moved + * to the interpolated surface according to their barycentric coordinates. Quads + * will ignore their interior triangle bisector. + * + * @param length The length that edges will be broken down to. + */ +Manifold Manifold::RefineToLength(float length) const { + length = glm::abs(length); + auto pImpl = std::make_shared(*GetCsgLeafNode().GetImpl()); + pImpl->Refine( + [length](glm::vec3 edge) { return glm::length(edge) / length; }); + return Manifold(std::make_shared(pImpl)); +} + +/** + * The central operation of this library: the Boolean combines two manifolds + * into another by calculating their intersections and removing the unused + * portions. + * [ε-valid](https://github.com/elalish/manifold/wiki/Manifold-Library#definition-of-%CE%B5-valid) + * inputs will produce ε-valid output. ε-invalid input may fail + * triangulation. + * + * These operations are optimized to produce nearly-instant results if either + * input is empty or their bounding boxes do not overlap. + * + * @param second The other Manifold. + * @param op The type of operation to perform. + */ +Manifold Manifold::Boolean(const Manifold& second, OpType op) const { + return Manifold(pNode_->Boolean(second.pNode_, op)); +} + +/** + * Perform the given boolean operation on a list of Manifolds. In case of + * Subtract, all Manifolds in the tail are differenced from the head. + */ +Manifold Manifold::BatchBoolean(const std::vector& manifolds, + OpType op) { + if (manifolds.size() == 0) + return Manifold(); + else if (manifolds.size() == 1) + return manifolds[0]; + std::vector> children; + children.reserve(manifolds.size()); + for (const auto& m : manifolds) children.push_back(m.pNode_); + return Manifold(std::make_shared(children, op)); +} + +/** + * Shorthand for Boolean Union. + */ +Manifold Manifold::operator+(const Manifold& Q) const { + return Boolean(Q, OpType::Add); +} + +/** + * Shorthand for Boolean Union assignment. + */ +Manifold& Manifold::operator+=(const Manifold& Q) { + *this = *this + Q; + return *this; +} + +/** + * Shorthand for Boolean Difference. + */ +Manifold Manifold::operator-(const Manifold& Q) const { + return Boolean(Q, OpType::Subtract); +} + +/** + * Shorthand for Boolean Difference assignment. + */ +Manifold& Manifold::operator-=(const Manifold& Q) { + *this = *this - Q; + return *this; +} + +/** + * Shorthand for Boolean Intersection. + */ +Manifold Manifold::operator^(const Manifold& Q) const { + return Boolean(Q, OpType::Intersect); +} + +/** + * Shorthand for Boolean Intersection assignment. + */ +Manifold& Manifold::operator^=(const Manifold& Q) { + *this = *this ^ Q; + return *this; +} + +/** + * Split cuts this manifold in two using the cutter manifold. The first result + * is the intersection, second is the difference. This is more efficient than + * doing them separately. + * + * @param cutter + */ +std::pair Manifold::Split(const Manifold& cutter) const { + auto impl1 = GetCsgLeafNode().GetImpl(); + auto impl2 = cutter.GetCsgLeafNode().GetImpl(); + + Boolean3 boolean(*impl1, *impl2, OpType::Subtract); + auto result1 = std::make_shared( + std::make_unique(boolean.Result(OpType::Intersect))); + auto result2 = std::make_shared( + std::make_unique(boolean.Result(OpType::Subtract))); + return std::make_pair(Manifold(result1), Manifold(result2)); +} + +/** + * Convenient version of Split() for a half-space. + * + * @param normal This vector is normal to the cutting plane and its length does + * not matter. The first result is in the direction of this vector, the second + * result is on the opposite side. + * @param originOffset The distance of the plane from the origin in the + * direction of the normal vector. + */ +std::pair Manifold::SplitByPlane(glm::vec3 normal, + float originOffset) const { + return Split(Halfspace(BoundingBox(), normal, originOffset)); +} + +/** + * Identical to SplitByPlane(), but calculating and returning only the first + * result. + * + * @param normal This vector is normal to the cutting plane and its length does + * not matter. The result is in the direction of this vector from the plane. + * @param originOffset The distance of the plane from the origin in the + * direction of the normal vector. + */ +Manifold Manifold::TrimByPlane(glm::vec3 normal, float originOffset) const { + return *this ^ Halfspace(BoundingBox(), normal, originOffset); +} + +/** + * Returns the cross section of this object parallel to the X-Y plane at the + * specified Z height, defaulting to zero. Using a height equal to the bottom of + * the bounding box will return the bottom faces, while using a height equal to + * the top of the bounding box will return empty. + */ +CrossSection Manifold::Slice(float height) const { + return GetCsgLeafNode().GetImpl()->Slice(height); +} + +/** + * Returns a cross section representing the projected outline of this object + * onto the X-Y plane. + */ +CrossSection Manifold::Project() const { + return GetCsgLeafNode().GetImpl()->Project(); +} + +ExecutionParams& ManifoldParams() { return manifoldParams; } + +/** + * Compute the convex hull of a set of points. If the given points are fewer + * than 4, or they are all coplanar, an empty Manifold will be returned. + * + * @param pts A vector of 3-dimensional points over which to compute a convex + * hull. + */ +Manifold Manifold::Hull(const std::vector& pts) { + ZoneScoped; + const int numVert = pts.size(); + if (numVert < 4) return Manifold(); + + std::vector> vertices(numVert); + for (int i = 0; i < numVert; i++) { + vertices[i] = {pts[i].x, pts[i].y, pts[i].z}; + } + + quickhull::QuickHull qh; + // bools: correct triangle winding, and use original indices + auto hull = qh.getConvexHull(vertices, false, true); + const auto& triangles = hull.getIndexBuffer(); + const int numTris = triangles.size() / 3; + + Mesh mesh; + mesh.vertPos = pts; + mesh.triVerts.reserve(numTris); + for (int i = 0; i < numTris; i++) { + const int j = i * 3; + mesh.triVerts.push_back({triangles[j], triangles[j + 1], triangles[j + 2]}); + } + return Manifold(mesh); +} + +/** + * Compute the convex hull of this manifold. + */ +Manifold Manifold::Hull() const { return Hull(GetMesh().vertPos); } + +/** + * Compute the convex hull enveloping a set of manifolds. + * + * @param manifolds A vector of manifolds over which to compute a convex hull. + */ +Manifold Manifold::Hull(const std::vector& manifolds) { + return Compose(manifolds).Hull(); +} + +/** + * Returns the minimum gap between two manifolds. Returns a float between + * 0 and searchLength. + * + * @param other The other manifold to compute the minimum gap to. + * @param searchLength The maximum distance to search for a minimum gap. + */ +float Manifold::MinGap(const Manifold& other, float searchLength) const { + auto intersect = *this ^ other; + auto prop = intersect.GetProperties(); + + if (prop.volume != 0) return 0.0f; + + return GetCsgLeafNode().GetImpl()->MinGap(*other.GetCsgLeafNode().GetImpl(), + searchLength); +} +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/mesh_fixes.h b/thirdparty/manifold/src/manifold/src/mesh_fixes.h new file mode 100644 index 000000000000..d8cb4de892d7 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/mesh_fixes.h @@ -0,0 +1,57 @@ +#include "impl.h" + +namespace { +using namespace manifold; + +inline int FlipHalfedge(int halfedge) { + const int tri = halfedge / 3; + const int vert = 2 - (halfedge - 3 * tri); + return 3 * tri + vert; +} + +struct TransformNormals { + const glm::mat3 transform; + + glm::vec3 operator()(glm::vec3 normal) { + normal = glm::normalize(transform * normal); + if (isnan(normal.x)) normal = glm::vec3(0.0f); + return normal; + } +}; + +struct TransformTangents { + const glm::mat3 transform; + const bool invert; + VecView oldTangents; + VecView halfedge; + + void operator()(thrust::tuple inOut) { + glm::vec4& tangent = thrust::get<0>(inOut); + int edge = thrust::get<1>(inOut); + if (invert) { + edge = halfedge[FlipHalfedge(edge)].pairedHalfedge; + } + + tangent = glm::vec4(transform * glm::vec3(oldTangents[edge]), + oldTangents[edge].w); + } +}; + +struct FlipTris { + VecView halfedge; + + void operator()(thrust::tuple inOut) { + TriRef& bary = thrust::get<0>(inOut); + const int tri = thrust::get<1>(inOut); + + thrust::swap(halfedge[3 * tri], halfedge[3 * tri + 2]); + + for (const int i : {0, 1, 2}) { + thrust::swap(halfedge[3 * tri + i].startVert, + halfedge[3 * tri + i].endVert); + halfedge[3 * tri + i].pairedHalfedge = + FlipHalfedge(halfedge[3 * tri + i].pairedHalfedge); + } + } +}; +} // namespace diff --git a/thirdparty/manifold/src/manifold/src/properties.cpp b/thirdparty/manifold/src/manifold/src/properties.cpp new file mode 100644 index 000000000000..d85945106b6a --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/properties.cpp @@ -0,0 +1,418 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "impl.h" +#include "par.h" + +namespace { +using namespace manifold; + +struct FaceAreaVolume { + VecView halfedges; + VecView vertPos; + const float precision; + + thrust::pair operator()(int face) { + float perimeter = 0; + glm::vec3 edge[3]; + for (int i : {0, 1, 2}) { + const int j = (i + 1) % 3; + edge[i] = vertPos[halfedges[3 * face + j].startVert] - + vertPos[halfedges[3 * face + i].startVert]; + perimeter += glm::length(edge[i]); + } + glm::vec3 crossP = glm::cross(edge[0], edge[1]); + + float area = glm::length(crossP); + float volume = glm::dot(crossP, vertPos[halfedges[3 * face].startVert]); + + return thrust::make_pair(area / 2.0f, volume / 6.0f); + } +}; + +struct PosMin + : public thrust::binary_function { + glm::vec3 operator()(glm::vec3 a, glm::vec3 b) { + if (isnan(a.x)) return b; + if (isnan(b.x)) return a; + return glm::min(a, b); + } +}; + +struct PosMax + : public thrust::binary_function { + glm::vec3 operator()(glm::vec3 a, glm::vec3 b) { + if (isnan(a.x)) return b; + if (isnan(b.x)) return a; + return glm::max(a, b); + } +}; + +struct FiniteVert { + bool operator()(glm::vec3 v) { return glm::all(glm::isfinite(v)); } +}; + +struct MakeMinMax { + glm::ivec2 operator()(glm::ivec3 tri) { + return glm::ivec2(glm::min(tri[0], glm::min(tri[1], tri[2])), + glm::max(tri[0], glm::max(tri[1], tri[2]))); + } +}; + +struct MinMax + : public thrust::binary_function { + glm::ivec2 operator()(glm::ivec2 a, glm::ivec2 b) { + a[0] = glm::min(a[0], b[0]); + a[1] = glm::max(a[1], b[1]); + return a; + } +}; + +struct SumPair : public thrust::binary_function, + thrust::pair, + thrust::pair> { + thrust::pair operator()(thrust::pair a, + thrust::pair b) { + a.first += b.first; + a.second += b.second; + return a; + } +}; + +struct CurvatureAngles { + VecView meanCurvature; + VecView gaussianCurvature; + VecView area; + VecView degree; + VecView halfedge; + VecView vertPos; + VecView triNormal; + + void operator()(int tri) { + glm::vec3 edge[3]; + glm::vec3 edgeLength(0.0); + for (int i : {0, 1, 2}) { + const int startVert = halfedge[3 * tri + i].startVert; + const int endVert = halfedge[3 * tri + i].endVert; + edge[i] = vertPos[endVert] - vertPos[startVert]; + edgeLength[i] = glm::length(edge[i]); + edge[i] /= edgeLength[i]; + const int neighborTri = halfedge[3 * tri + i].pairedHalfedge / 3; + const float dihedral = + 0.25 * edgeLength[i] * + glm::asin(glm::dot(glm::cross(triNormal[tri], triNormal[neighborTri]), + edge[i])); + AtomicAdd(meanCurvature[startVert], dihedral); + AtomicAdd(meanCurvature[endVert], dihedral); + AtomicAdd(degree[startVert], 1.0f); + } + + glm::vec3 phi; + phi[0] = glm::acos(-glm::dot(edge[2], edge[0])); + phi[1] = glm::acos(-glm::dot(edge[0], edge[1])); + phi[2] = glm::pi() - phi[0] - phi[1]; + const float area3 = edgeLength[0] * edgeLength[1] * + glm::length(glm::cross(edge[0], edge[1])) / 6; + + for (int i : {0, 1, 2}) { + const int vert = halfedge[3 * tri + i].startVert; + AtomicAdd(gaussianCurvature[vert], -phi[i]); + AtomicAdd(area[vert], area3); + } + } +}; + +struct NormalizeCurvature { + void operator()(thrust::tuple inOut) { + float& meanCurvature = thrust::get<0>(inOut); + float& gaussianCurvature = thrust::get<1>(inOut); + float area = thrust::get<2>(inOut); + float degree = thrust::get<3>(inOut); + float factor = degree / (6 * area); + meanCurvature *= factor; + gaussianCurvature *= factor; + } +}; + +struct UpdateProperties { + VecView properties; + + VecView oldProperties; + VecView halfedge; + VecView meanCurvature; + VecView gaussianCurvature; + const int oldNumProp; + const int numProp; + const int gaussianIdx; + const int meanIdx; + + // FIXME: race condition + void operator()(thrust::tuple inOut) { + glm::ivec3& triProp = thrust::get<0>(inOut); + const int tri = thrust::get<1>(inOut); + + for (const int i : {0, 1, 2}) { + const int vert = halfedge[3 * tri + i].startVert; + if (oldNumProp == 0) { + triProp[i] = vert; + } + const int propVert = triProp[i]; + + for (int p = 0; p < oldNumProp; ++p) { + properties[numProp * propVert + p] = + oldProperties[oldNumProp * propVert + p]; + } + + if (gaussianIdx >= 0) { + properties[numProp * propVert + gaussianIdx] = gaussianCurvature[vert]; + } + if (meanIdx >= 0) { + properties[numProp * propVert + meanIdx] = meanCurvature[vert]; + } + } + } +}; + +struct CheckHalfedges { + VecView halfedges; + VecView vertPos; + + bool operator()(size_t edge) { + const Halfedge halfedge = halfedges[edge]; + if (halfedge.startVert == -1 || halfedge.endVert == -1) return true; + if (halfedge.pairedHalfedge == -1) return false; + + if (!isfinite(vertPos[halfedge.startVert][0])) return false; + if (!isfinite(vertPos[halfedge.endVert][0])) return false; + + const Halfedge paired = halfedges[halfedge.pairedHalfedge]; + bool good = true; + good &= paired.pairedHalfedge == edge; + good &= halfedge.startVert != halfedge.endVert; + good &= halfedge.startVert == paired.endVert; + good &= halfedge.endVert == paired.startVert; + return good; + } +}; + +struct NoDuplicates { + VecView halfedges; + + bool operator()(int edge) { + const Halfedge halfedge = halfedges[edge]; + if (halfedge.startVert == -1 && halfedge.endVert == -1 && + halfedge.pairedHalfedge == -1) + return true; + return halfedge.startVert != halfedges[edge + 1].startVert || + halfedge.endVert != halfedges[edge + 1].endVert; + } +}; + +struct CheckCCW { + VecView halfedges; + VecView vertPos; + VecView triNormal; + const float tol; + + bool operator()(int face) { + if (halfedges[3 * face].pairedHalfedge < 0) return true; + + const glm::mat3x2 projection = GetAxisAlignedProjection(triNormal[face]); + glm::vec2 v[3]; + for (int i : {0, 1, 2}) + v[i] = projection * vertPos[halfedges[3 * face + i].startVert]; + + int ccw = CCW(v[0], v[1], v[2], glm::abs(tol)); + bool check = tol > 0 ? ccw >= 0 : ccw == 0; + +#ifdef MANIFOLD_DEBUG + if (tol > 0 && !check) { + glm::vec2 v1 = v[1] - v[0]; + glm::vec2 v2 = v[2] - v[0]; + float area = v1.x * v2.y - v1.y * v2.x; + float base2 = glm::max(glm::dot(v1, v1), glm::dot(v2, v2)); + float base = glm::sqrt(base2); + glm::vec3 V0 = vertPos[halfedges[3 * face].startVert]; + glm::vec3 V1 = vertPos[halfedges[3 * face + 1].startVert]; + glm::vec3 V2 = vertPos[halfedges[3 * face + 2].startVert]; + glm::vec3 norm = glm::cross(V1 - V0, V2 - V0); + printf( + "Tri %d does not match normal, approx height = %g, base = %g\n" + "tol = %g, area2 = %g, base2*tol2 = %g\n" + "normal = %g, %g, %g\n" + "norm = %g, %g, %g\nverts: %d, %d, %d\n", + face, area / base, base, tol, area * area, base2 * tol * tol, + triNormal[face].x, triNormal[face].y, triNormal[face].z, norm.x, + norm.y, norm.z, halfedges[3 * face].startVert, + halfedges[3 * face + 1].startVert, halfedges[3 * face + 2].startVert); + } +#endif + return check; + } +}; +} // namespace + +namespace manifold { + +/** + * Returns true if this manifold is in fact an oriented even manifold and all of + * the data structures are consistent. + */ +bool Manifold::Impl::IsManifold() const { + if (halfedge_.size() == 0) return true; + auto policy = autoPolicy(halfedge_.size()); + + return all_of(policy, countAt(0_z), countAt(halfedge_.size()), + CheckHalfedges({halfedge_, vertPos_})); +} + +/** + * Returns true if this manifold is in fact an oriented 2-manifold and all of + * the data structures are consistent. + */ +bool Manifold::Impl::Is2Manifold() const { + if (halfedge_.size() == 0) return true; + auto policy = autoPolicy(halfedge_.size()); + + if (!IsManifold()) return false; + + Vec halfedge(halfedge_); + stable_sort(policy, halfedge.begin(), halfedge.end()); + + return all_of(policy, countAt(0), countAt(2 * NumEdge() - 1), + NoDuplicates({halfedge})); +} + +/** + * Returns true if all triangles are CCW relative to their triNormals_. + */ +bool Manifold::Impl::MatchesTriNormals() const { + if (halfedge_.size() == 0 || faceNormal_.size() != NumTri()) return true; + return all_of(autoPolicy(NumTri()), countAt(0), countAt(NumTri()), + CheckCCW({halfedge_, vertPos_, faceNormal_, 2 * precision_})); +} + +/** + * Returns the number of triangles that are colinear within precision_. + */ +int Manifold::Impl::NumDegenerateTris() const { + if (halfedge_.size() == 0 || faceNormal_.size() != NumTri()) return true; + return count_if( + autoPolicy(NumTri()), countAt(0), countAt(NumTri()), + CheckCCW({halfedge_, vertPos_, faceNormal_, -1 * precision_ / 2})); +} + +Properties Manifold::Impl::GetProperties() const { + ZoneScoped; + if (IsEmpty()) return {0, 0}; + // Kahan summation + float area = 0; + float volume = 0; + float areaCompensation = 0; + float volumeCompensation = 0; + for (int i = 0; i < NumTri(); ++i) { + auto [area1, volume1] = + FaceAreaVolume({halfedge_, vertPos_, precision_})(i); + const float t1 = area + area1; + const float t2 = volume + volume1; + areaCompensation += (area - t1) + area1; + volumeCompensation += (volume - t2) + volume1; + area = t1; + volume = t2; + } + area += areaCompensation; + volume += volumeCompensation; + + return {area, volume}; +} + +void Manifold::Impl::CalculateCurvature(int gaussianIdx, int meanIdx) { + ZoneScoped; + if (IsEmpty()) return; + if (gaussianIdx < 0 && meanIdx < 0) return; + Vec vertMeanCurvature(NumVert(), 0); + Vec vertGaussianCurvature(NumVert(), glm::two_pi()); + Vec vertArea(NumVert(), 0); + Vec degree(NumVert(), 0); + auto policy = autoPolicy(NumTri()); + for_each(policy, countAt(0), countAt(NumTri()), + CurvatureAngles({vertMeanCurvature, vertGaussianCurvature, vertArea, + degree, halfedge_, vertPos_, faceNormal_})); + for_each_n(policy, + zip(vertMeanCurvature.begin(), vertGaussianCurvature.begin(), + vertArea.begin(), degree.begin()), + NumVert(), NormalizeCurvature()); + + const int oldNumProp = NumProp(); + const int numProp = glm::max(oldNumProp, glm::max(gaussianIdx, meanIdx) + 1); + const Vec oldProperties = meshRelation_.properties; + meshRelation_.properties = Vec(numProp * NumPropVert(), 0); + meshRelation_.numProp = numProp; + if (meshRelation_.triProperties.size() == 0) { + meshRelation_.triProperties.resize(NumTri()); + } + + for_each_n( + policy, zip(meshRelation_.triProperties.begin(), countAt(0)), NumTri(), + UpdateProperties({meshRelation_.properties, oldProperties, halfedge_, + vertMeanCurvature, vertGaussianCurvature, oldNumProp, + numProp, gaussianIdx, meanIdx})); + + CreateFaces(); + Finish(); +} + +/** + * Calculates the bounding box of the entire manifold, which is stored + * internally to short-cut Boolean operations and to serve as the precision + * range for Morton code calculation. Ignores NaNs. + */ +void Manifold::Impl::CalculateBBox() { + auto policy = autoPolicy(NumVert()); + bBox_.min = reduce( + policy, vertPos_.begin(), vertPos_.end(), + glm::vec3(std::numeric_limits::infinity()), PosMin()); + bBox_.max = reduce( + policy, vertPos_.begin(), vertPos_.end(), + glm::vec3(-std::numeric_limits::infinity()), PosMax()); +} + +/** + * Determines if all verts are finite. Checking just the bounding box dimensions + * is insufficient as it ignores NaNs. + */ +bool Manifold::Impl::IsFinite() const { + auto policy = autoPolicy(NumVert()); + return transform_reduce(policy, vertPos_.begin(), vertPos_.end(), + FiniteVert(), true, + thrust::logical_and()); +} + +/** + * Checks that the input triVerts array has all indices inside bounds of the + * vertPos_ array. + */ +bool Manifold::Impl::IsIndexInBounds(VecView triVerts) const { + auto policy = autoPolicy(triVerts.size()); + glm::ivec2 minmax = transform_reduce( + policy, triVerts.begin(), triVerts.end(), MakeMinMax(), + glm::ivec2(std::numeric_limits::max(), + std::numeric_limits::min()), + MinMax()); + + return minmax[0] >= 0 && minmax[1] < NumVert(); +} +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/shared.h b/thirdparty/manifold/src/manifold/src/shared.h new file mode 100644 index 000000000000..ed457dad5e32 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/shared.h @@ -0,0 +1,232 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "par.h" +#include "sparse.h" +#include "utils.h" +#include "vec.h" + +namespace manifold { + +/** @addtogroup Private + * @{ + */ +inline glm::vec3 SafeNormalize(glm::vec3 v) { + v = glm::normalize(v); + return glm::isfinite(v.x) ? v : glm::vec3(0); +} + +inline float MaxPrecision(float minPrecision, const Box& bBox) { + float precision = glm::max(minPrecision, kTolerance * bBox.Scale()); + return glm::isfinite(precision) ? precision : -1; +} + +inline int NextHalfedge(int current) { + ++current; + if (current % 3 == 0) current -= 3; + return current; +} + +inline glm::mat3 NormalTransform(const glm::mat4x3& transform) { + return glm::inverse(glm::transpose(glm::mat3(transform))); +} + +/** + * By using the closest axis-aligned projection to the normal instead of a + * projection along the normal, we avoid introducing any rounding error. + */ +inline glm::mat3x2 GetAxisAlignedProjection(glm::vec3 normal) { + glm::vec3 absNormal = glm::abs(normal); + float xyzMax; + glm::mat2x3 projection; + if (absNormal.z > absNormal.x && absNormal.z > absNormal.y) { + projection = glm::mat2x3(1.0f, 0.0f, 0.0f, // + 0.0f, 1.0f, 0.0f); + xyzMax = normal.z; + } else if (absNormal.y > absNormal.x) { + projection = glm::mat2x3(0.0f, 0.0f, 1.0f, // + 1.0f, 0.0f, 0.0f); + xyzMax = normal.y; + } else { + projection = glm::mat2x3(0.0f, 1.0f, 0.0f, // + 0.0f, 0.0f, 1.0f); + xyzMax = normal.x; + } + if (xyzMax < 0) projection[0] *= -1.0f; + return glm::transpose(projection); +} + +inline glm::vec3 GetBarycentric(const glm::vec3& v, const glm::mat3& triPos, + float precision) { + const glm::mat3 edges(triPos[2] - triPos[1], triPos[0] - triPos[2], + triPos[1] - triPos[0]); + const glm::vec3 d2(glm::dot(edges[0], edges[0]), glm::dot(edges[1], edges[1]), + glm::dot(edges[2], edges[2])); + const int longSide = d2[0] > d2[1] && d2[0] > d2[2] ? 0 + : d2[1] > d2[2] ? 1 + : 2; + const glm::vec3 crossP = glm::cross(edges[0], edges[1]); + const float area2 = glm::dot(crossP, crossP); + const float tol2 = precision * precision; + + glm::vec3 uvw(0); + for (const int i : {0, 1, 2}) { + const glm::vec3 dv = v - triPos[i]; + if (glm::dot(dv, dv) < tol2) { + // Return exactly equal if within tolerance of vert. + uvw[i] = 1; + return uvw; + } + } + + if (d2[longSide] < tol2) { // point + return glm::vec3(1, 0, 0); + } else if (area2 > d2[longSide] * tol2) { // triangle + for (const int i : {0, 1, 2}) { + const int j = Next3(i); + const glm::vec3 crossPv = glm::cross(edges[i], v - triPos[j]); + const float area2v = glm::dot(crossPv, crossPv); + // Return exactly equal if within tolerance of edge. + uvw[i] = area2v < d2[i] * tol2 ? 0 : glm::dot(crossPv, crossP); + } + uvw /= (uvw[0] + uvw[1] + uvw[2]); + return uvw; + } else { // line + const int nextV = Next3(longSide); + const float alpha = + glm::dot(v - triPos[nextV], edges[longSide]) / d2[longSide]; + uvw[longSide] = 0; + uvw[nextV] = 1 - alpha; + const int lastV = Next3(nextV); + uvw[lastV] = alpha; + return uvw; + } +} + +/** + * The fundamental component of the halfedge data structure used for storing and + * operating on the Manifold. + */ +struct Halfedge { + int startVert, endVert; + int pairedHalfedge; + int face; + bool IsForward() const { return startVert < endVert; } + bool operator<(const Halfedge& other) const { + return startVert == other.startVert ? endVert < other.endVert + : startVert < other.startVert; + } +}; + +struct Barycentric { + int tri; + glm::vec4 uvw; +}; + +struct TriRef { + /// The unique ID of the mesh instance of this triangle. If .meshID and .tri + /// match for two triangles, then they are coplanar and came from the same + /// face. + int meshID; + /// The OriginalID of the mesh this triangle came from. This ID is ideal for + /// reapplying properties like UV coordinates to the output mesh. + int originalID; + /// The triangle index of the original triangle this was part of: + /// Mesh.triVerts[tri]. + int tri; + + bool SameFace(const TriRef& other) const { + return meshID == other.meshID && tri == other.tri; + } +}; + +/** + * This is a temporary edge structure which only stores edges forward and + * references the halfedge it was created from. + */ +struct TmpEdge { + int first, second, halfedgeIdx; + + TmpEdge() {} + TmpEdge(int start, int end, int idx) { + first = glm::min(start, end); + second = glm::max(start, end); + halfedgeIdx = idx; + } + + bool operator<(const TmpEdge& other) const { + return first == other.first ? second < other.second : first < other.first; + } +}; +/** @} */ + +struct Halfedge2Tmp { + void operator()(thrust::tuple inout) { + const Halfedge& halfedge = thrust::get<1>(inout); + int idx = thrust::get<2>(inout); + if (!halfedge.IsForward()) idx = -1; + + thrust::get<0>(inout) = TmpEdge(halfedge.startVert, halfedge.endVert, idx); + } +}; + +struct TmpInvalid { + bool operator()(const TmpEdge& edge) { return edge.halfedgeIdx < 0; } +}; + +Vec inline CreateTmpEdges(const Vec& halfedge) { + Vec edges(halfedge.size()); + for_each_n(autoPolicy(edges.size()), + zip(edges.begin(), halfedge.begin(), countAt(0)), edges.size(), + Halfedge2Tmp()); + int numEdge = + remove_if( + autoPolicy(edges.size()), edges.begin(), edges.end(), TmpInvalid()) - + edges.begin(); + ASSERT(numEdge == halfedge.size() / 2, topologyErr, "Not oriented!"); + edges.resize(numEdge); + return edges; +} + +template +struct ReindexEdge { + VecView edges; + SparseIndices& indices; + + void operator()(size_t i) { + int& edge = indices.Get(i, inverted); + edge = edges[edge].halfedgeIdx; + } +}; + +#ifdef MANIFOLD_DEBUG +inline std::ostream& operator<<(std::ostream& stream, const Halfedge& edge) { + return stream << "startVert = " << edge.startVert + << ", endVert = " << edge.endVert + << ", pairedHalfedge = " << edge.pairedHalfedge + << ", face = " << edge.face; +} + +inline std::ostream& operator<<(std::ostream& stream, const Barycentric& bary) { + return stream << "tri = " << bary.tri << ", uvw = " << bary.uvw; +} + +inline std::ostream& operator<<(std::ostream& stream, const TriRef& ref) { + return stream << "meshID: " << ref.meshID + << ", originalID: " << ref.originalID << ", tri: " << ref.tri; +} +#endif +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/smoothing.cpp b/thirdparty/manifold/src/manifold/src/smoothing.cpp new file mode 100644 index 000000000000..294f2cdf5a74 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/smoothing.cpp @@ -0,0 +1,847 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "impl.h" +#include "par.h" + +namespace { +using namespace manifold; + +glm::vec3 OrthogonalTo(glm::vec3 in, glm::vec3 ref) { + in -= glm::dot(in, ref) * ref; + return in; +} + +// Calculate a tangent vector in the form of a weighted cubic Bezier taking as +// input the desired tangent direction (length doesn't matter) and the edge +// vector to the neighboring vertex. In a symmetric situation where the tangents +// at each end are mirror images of each other, this will result in a circular +// arc. +glm::vec4 CircularTangent(const glm::vec3& tangent, const glm::vec3& edgeVec) { + const glm::vec3 dir = SafeNormalize(tangent); + + float weight = glm::abs(glm::dot(dir, SafeNormalize(edgeVec))); + if (weight == 0) { + weight = 1; + } + // Quadratic weighted bezier for circular interpolation + const glm::vec4 bz2 = + weight * glm::vec4(dir * glm::length(edgeVec) / (2 * weight), 1); + // Equivalent cubic weighted bezier + const glm::vec4 bz3 = glm::mix(glm::vec4(0, 0, 0, 1), bz2, 2 / 3.0f); + // Convert from homogeneous form to geometric form + return glm::vec4(glm::vec3(bz3) / bz3.w, bz3.w); +} + +struct SmoothBezier { + const Manifold::Impl* impl; + VecView vertNormal; + + void operator()(thrust::tuple inOut) { + glm::vec4& tangent = thrust::get<0>(inOut); + const Halfedge edge = thrust::get<1>(inOut); + const int edgeIdx = thrust::get<2>(inOut); + + if (impl->IsInsideQuad(edgeIdx)) { + tangent = glm::vec4(0, 0, 0, -1); + return; + } + + const glm::vec3 edgeVec = + impl->vertPos_[edge.endVert] - impl->vertPos_[edge.startVert]; + const glm::vec3 edgeNormal = + impl->faceNormal_[edge.face] + + impl->faceNormal_[impl->halfedge_[edge.pairedHalfedge].face]; + glm::vec3 dir = + glm::cross(glm::cross(edgeNormal, edgeVec), vertNormal[edge.startVert]); + tangent = CircularTangent(dir, edgeVec); + } +}; + +struct InterpTri { + const Manifold::Impl* impl; + + glm::vec4 Homogeneous(glm::vec4 v) const { + v.x *= v.w; + v.y *= v.w; + v.z *= v.w; + return v; + } + + glm::vec4 Homogeneous(glm::vec3 v) const { return glm::vec4(v, 1.0f); } + + glm::vec3 HNormalize(glm::vec4 v) const { + return v.w == 0 ? v : (glm::vec3(v) / v.w); + } + + glm::vec4 Scale(glm::vec4 v, float scale) const { + return glm::vec4(scale * glm::vec3(v), v.w); + } + + glm::vec4 Bezier(glm::vec3 point, glm::vec4 tangent) const { + return Homogeneous(glm::vec4(point, 0) + tangent); + } + + glm::mat2x4 CubicBezier2Linear(glm::vec4 p0, glm::vec4 p1, glm::vec4 p2, + glm::vec4 p3, float x) const { + glm::mat2x4 out; + glm::vec4 p12 = glm::mix(p1, p2, x); + out[0] = glm::mix(glm::mix(p0, p1, x), p12, x); + out[1] = glm::mix(p12, glm::mix(p2, p3, x), x); + return out; + } + + glm::vec3 BezierPoint(glm::mat2x4 points, float x) const { + return HNormalize(glm::mix(points[0], points[1], x)); + } + + glm::vec3 BezierTangent(glm::mat2x4 points) const { + return SafeNormalize(HNormalize(points[1]) - HNormalize(points[0])); + } + + glm::vec3 RotateFromTo(glm::vec3 v, glm::quat start, glm::quat end) const { + return end * glm::conjugate(start) * v; + } + + glm::mat2x4 Bezier2Bezier(const glm::mat2x3& corners, + const glm::mat2x4& tangentsX, + const glm::mat2x4& tangentsY, float x, + const glm::bvec2& pointedEnds, + const glm::vec3& anchor) const { + const glm::mat2x4 bez = CubicBezier2Linear( + Homogeneous(corners[0]), Bezier(corners[0], tangentsX[0]), + Bezier(corners[1], tangentsX[1]), Homogeneous(corners[1]), x); + const glm::vec3 end = BezierPoint(bez, x); + const glm::vec3 tangent = BezierTangent(bez); + + const glm::mat2x3 nTangentsX(SafeNormalize(glm::vec3(tangentsX[0])), + -SafeNormalize(glm::vec3(tangentsX[1]))); + const glm::mat2x3 biTangents = { + SafeNormalize(OrthogonalTo( + glm::vec3(tangentsY[0]) + kTolerance * (anchor - corners[0]), + nTangentsX[0])), + SafeNormalize(OrthogonalTo( + glm::vec3(tangentsY[1]) + kTolerance * (anchor - corners[1]), + nTangentsX[1]))}; + + const glm::quat q0 = + glm::quat_cast(glm::mat3(nTangentsX[0], biTangents[0], + glm::cross(nTangentsX[0], biTangents[0]))); + const glm::quat q1 = + glm::quat_cast(glm::mat3(nTangentsX[1], biTangents[1], + glm::cross(nTangentsX[1], biTangents[1]))); + const glm::quat qTmp = glm::slerp(q0, q1, x); + const glm::quat q = + glm::rotation(qTmp * glm::vec3(1, 0, 0), tangent) * qTmp; + + const glm::vec3 end0 = pointedEnds[0] + ? glm::vec3(0) + : RotateFromTo(glm::vec3(tangentsY[0]), q0, q); + const glm::vec3 end1 = pointedEnds[1] + ? glm::vec3(0) + : RotateFromTo(glm::vec3(tangentsY[1]), q1, q); + const glm::vec3 delta = glm::mix(end0, end1, x); + const float deltaW = glm::mix(tangentsY[0].w, tangentsY[1].w, x); + + return {Homogeneous(end), glm::vec4(delta, deltaW)}; + } + + glm::vec3 Bezier2D(const glm::mat4x3& corners, const glm::mat4& tangentsX, + const glm::mat4& tangentsY, float x, float y, + const glm::vec3& centroid, bool isTriangle) const { + glm::mat2x4 bez0 = Bezier2Bezier( + {corners[0], corners[1]}, {tangentsX[0], tangentsX[1]}, + {tangentsY[0], tangentsY[1]}, x, {isTriangle, false}, centroid); + glm::mat2x4 bez1 = Bezier2Bezier( + {corners[2], corners[3]}, {tangentsX[2], tangentsX[3]}, + {tangentsY[2], tangentsY[3]}, 1 - x, {false, isTriangle}, centroid); + + const float flatLen = + isTriangle ? x * glm::length(corners[1] - corners[2]) + : glm::length(glm::mix(corners[0], corners[1], x) - + glm::mix(corners[3], corners[2], x)); + const float scale = glm::length(glm::vec3(bez0[0] - bez1[0])) / flatLen; + + const glm::mat2x4 bez = CubicBezier2Linear( + bez0[0], Bezier(glm::vec3(bez0[0]), Scale(bez0[1], scale)), + Bezier(glm::vec3(bez1[0]), Scale(bez1[1], scale)), bez1[0], y); + return BezierPoint(bez, y); + } + + void operator()(thrust::tuple inOut) { + glm::vec3& pos = thrust::get<0>(inOut); + const int tri = thrust::get<1>(inOut).tri; + const glm::vec4 uvw = thrust::get<1>(inOut).uvw; + + const glm::ivec4 halfedges = impl->GetHalfedges(tri); + const glm::mat4x3 corners = { + impl->vertPos_[impl->halfedge_[halfedges[0]].startVert], + impl->vertPos_[impl->halfedge_[halfedges[1]].startVert], + impl->vertPos_[impl->halfedge_[halfedges[2]].startVert], + halfedges[3] < 0 + ? glm::vec3(0) + : impl->vertPos_[impl->halfedge_[halfedges[3]].startVert]}; + + for (const int i : {0, 1, 2, 3}) { + if (uvw[i] == 1) { + pos = corners[i]; + return; + } + } + + glm::vec4 posH(0); + + if (halfedges[3] < 0) { // tri + const glm::mat3x4 tangentR = {impl->halfedgeTangent_[halfedges[0]], + impl->halfedgeTangent_[halfedges[1]], + impl->halfedgeTangent_[halfedges[2]]}; + const glm::mat3x4 tangentL = { + impl->halfedgeTangent_[impl->halfedge_[halfedges[2]].pairedHalfedge], + impl->halfedgeTangent_[impl->halfedge_[halfedges[0]].pairedHalfedge], + impl->halfedgeTangent_[impl->halfedge_[halfedges[1]].pairedHalfedge]}; + const glm::vec3 centroid = glm::mat3(corners) * glm::vec3(1.0f / 3); + + for (const int i : {0, 1, 2}) { + const int j = Next3(i); + const int k = Prev3(i); + const float x = uvw[k] / (1 - uvw[i]); + const glm::vec3 p = + Bezier2D({corners[i], corners[j], corners[k], corners[i]}, + {tangentR[i], tangentL[j], tangentR[k], tangentL[i]}, + {tangentL[i], tangentR[j], tangentL[k], tangentR[i]}, + 1 - uvw[i], x, centroid, true); + posH += Homogeneous(glm::vec4(p, uvw[i])); + } + } else { // quad + const glm::mat4 tangentsX = { + impl->halfedgeTangent_[halfedges[0]], + impl->halfedgeTangent_[impl->halfedge_[halfedges[0]].pairedHalfedge], + impl->halfedgeTangent_[halfedges[2]], + impl->halfedgeTangent_[impl->halfedge_[halfedges[2]].pairedHalfedge]}; + const glm::mat4 tangentsY = { + impl->halfedgeTangent_[impl->halfedge_[halfedges[3]].pairedHalfedge], + impl->halfedgeTangent_[halfedges[1]], + impl->halfedgeTangent_[impl->halfedge_[halfedges[1]].pairedHalfedge], + impl->halfedgeTangent_[halfedges[3]]}; + const glm::vec3 centroid = corners * glm::vec4(0.25); + const float x = uvw[1] + uvw[2]; + const float y = uvw[2] + uvw[3]; + const glm::vec3 pX = + Bezier2D(corners, tangentsX, tangentsY, x, y, centroid, false); + const glm::vec3 pY = + Bezier2D({corners[1], corners[2], corners[3], corners[0]}, + {tangentsY[1], tangentsY[2], tangentsY[3], tangentsY[0]}, + {tangentsX[1], tangentsX[2], tangentsX[3], tangentsX[0]}, y, + 1 - x, centroid, false); + posH += Homogeneous(glm::vec4(pX, x * (1 - x))); + posH += Homogeneous(glm::vec4(pY, y * (1 - y))); + } + pos = HNormalize(posH); + } +}; +} // namespace + +namespace manifold { + +/** + * Get the property normal associated with the startVert of this halfedge, where + * normalIdx shows the beginning of where normals are stored in the properties. + */ +glm::vec3 Manifold::Impl::GetNormal(int halfedge, int normalIdx) const { + const int tri = halfedge / 3; + const int j = halfedge % 3; + const int prop = meshRelation_.triProperties[tri][j]; + glm::vec3 normal; + for (const int i : {0, 1, 2}) { + normal[i] = + meshRelation_.properties[prop * meshRelation_.numProp + normalIdx + i]; + } + return normal; +} + +/** + * Returns true if this halfedge should be marked as the interior of a quad, as + * defined by its two triangles referring to the same face, and those triangles + * having no further face neighbors beyond. + */ +bool Manifold::Impl::IsInsideQuad(int halfedge) const { + if (halfedgeTangent_.size() > 0) { + return halfedgeTangent_[halfedge].w < 0; + } + const int tri = halfedge_[halfedge].face; + const TriRef ref = meshRelation_.triRef[tri]; + const int pair = halfedge_[halfedge].pairedHalfedge; + const int pairTri = halfedge_[pair].face; + const TriRef pairRef = meshRelation_.triRef[pairTri]; + if (!ref.SameFace(pairRef)) return false; + + auto SameFace = [this](int halfedge, const TriRef& ref) { + return ref.SameFace( + meshRelation_.triRef[halfedge_[halfedge].pairedHalfedge / 3]); + }; + + int neighbor = NextHalfedge(halfedge); + if (SameFace(neighbor, ref)) return false; + neighbor = NextHalfedge(neighbor); + if (SameFace(neighbor, ref)) return false; + neighbor = NextHalfedge(pair); + if (SameFace(neighbor, pairRef)) return false; + neighbor = NextHalfedge(neighbor); + if (SameFace(neighbor, pairRef)) return false; + return true; +} + +/** + * Returns true if this halfedge is an interior of a quad, as defined by its + * halfedge tangent having negative weight. + */ +bool Manifold::Impl::IsMarkedInsideQuad(int halfedge) const { + return halfedgeTangent_.size() > 0 && halfedgeTangent_[halfedge].w < 0; +} + +// sharpenedEdges are referenced to the input Mesh, but the triangles have +// been sorted in creating the Manifold, so the indices are converted using +// meshRelation_. +std::vector Manifold::Impl::UpdateSharpenedEdges( + const std::vector& sharpenedEdges) const { + std::unordered_map oldHalfedge2New; + for (int tri = 0; tri < NumTri(); ++tri) { + int oldTri = meshRelation_.triRef[tri].tri; + for (int i : {0, 1, 2}) oldHalfedge2New[3 * oldTri + i] = 3 * tri + i; + } + std::vector newSharp = sharpenedEdges; + for (Smoothness& edge : newSharp) { + edge.halfedge = oldHalfedge2New[edge.halfedge]; + } + return newSharp; +} + +// Find faces containing at least 3 triangles - these will not have +// interpolated normals - all their vert normals must match their face normal. +Vec Manifold::Impl::FlatFaces() const { + const int numTri = NumTri(); + Vec triIsFlatFace(numTri, false); + for_each_n(autoPolicy(numTri), countAt(0), numTri, + [this, &triIsFlatFace](const int tri) { + const TriRef& ref = meshRelation_.triRef[tri]; + int faceNeighbors = 0; + glm::ivec3 faceTris = {-1, -1, -1}; + for (const int j : {0, 1, 2}) { + const int neighborTri = + halfedge_[halfedge_[3 * tri + j].pairedHalfedge].face; + const TriRef& jRef = meshRelation_.triRef[neighborTri]; + if (jRef.SameFace(ref)) { + ++faceNeighbors; + faceTris[j] = neighborTri; + } + } + if (faceNeighbors > 1) { + triIsFlatFace[tri] = true; + for (const int j : {0, 1, 2}) { + if (faceTris[j] >= 0) { + triIsFlatFace[faceTris[j]] = true; + } + } + } + }); + return triIsFlatFace; +} + +// Returns a vector of length numVert that has a tri that is part of a +// neighboring flat face if there is only one flat face. If there are none it +// gets -1, and if there are more than one it gets -2. +Vec Manifold::Impl::VertFlatFace(const Vec& flatFaces) const { + Vec vertFlatFace(NumVert(), -1); + Vec vertRef(NumVert(), {-1, -1, -1}); + for (int tri = 0; tri < NumTri(); ++tri) { + if (flatFaces[tri]) { + for (const int j : {0, 1, 2}) { + const int vert = halfedge_[3 * tri + j].startVert; + if (vertRef[vert].SameFace(meshRelation_.triRef[tri])) continue; + vertRef[vert] = meshRelation_.triRef[tri]; + vertFlatFace[vert] = vertFlatFace[vert] == -1 ? tri : -2; + } + } + } + return vertFlatFace; +} + +std::vector Manifold::Impl::SharpenEdges( + float minSharpAngle, float minSmoothness) const { + std::vector sharpenedEdges; + const float minRadians = glm::radians(minSharpAngle); + for (int e = 0; e < halfedge_.size(); ++e) { + if (!halfedge_[e].IsForward()) continue; + const int pair = halfedge_[e].pairedHalfedge; + const float dihedral = + glm::acos(glm::dot(faceNormal_[e / 3], faceNormal_[pair / 3])); + if (dihedral > minRadians) { + sharpenedEdges.push_back({e, minSmoothness}); + sharpenedEdges.push_back({pair, minSmoothness}); + } + } + return sharpenedEdges; +} + +/** + * Sharpen tangents that intersect an edge to sharpen that edge. The weight is + * unchanged, as this has a squared effect on radius of curvature, except + * in the case of zero radius, which is marked with weight = 0. + */ +void Manifold::Impl::SharpenTangent(int halfedge, float smoothness) { + halfedgeTangent_[halfedge] = + glm::vec4(smoothness * glm::vec3(halfedgeTangent_[halfedge]), + smoothness == 0 ? 0 : halfedgeTangent_[halfedge].w); +} + +/** + * Instead of calculating the internal shared normals like CalculateNormals + * does, this method fills in vertex properties, unshared across edges that + * are bent more than minSharpAngle. + */ +void Manifold::Impl::SetNormals(int normalIdx, float minSharpAngle) { + if (IsEmpty()) return; + if (normalIdx < 0) return; + + const int oldNumProp = NumProp(); + const int numTri = NumTri(); + + Vec triIsFlatFace = FlatFaces(); + Vec vertFlatFace = VertFlatFace(triIsFlatFace); + Vec vertNumSharp(NumVert(), 0); + for (int e = 0; e < halfedge_.size(); ++e) { + if (!halfedge_[e].IsForward()) continue; + const int pair = halfedge_[e].pairedHalfedge; + const int tri1 = e / 3; + const int tri2 = pair / 3; + const float dihedral = + glm::degrees(glm::acos(glm::dot(faceNormal_[tri1], faceNormal_[tri2]))); + if (dihedral > minSharpAngle) { + ++vertNumSharp[halfedge_[e].startVert]; + ++vertNumSharp[halfedge_[e].endVert]; + } else { + const bool faceSplit = + triIsFlatFace[tri1] != triIsFlatFace[tri2] || + (triIsFlatFace[tri1] && triIsFlatFace[tri2] && + !meshRelation_.triRef[tri1].SameFace(meshRelation_.triRef[tri2])); + if (vertFlatFace[halfedge_[e].startVert] == -2 && faceSplit) { + ++vertNumSharp[halfedge_[e].startVert]; + } + if (vertFlatFace[halfedge_[e].endVert] == -2 && faceSplit) { + ++vertNumSharp[halfedge_[e].endVert]; + } + } + } + + const int numProp = glm::max(oldNumProp, normalIdx + 3); + Vec oldProperties(numProp * NumPropVert(), 0); + meshRelation_.properties.swap(oldProperties); + meshRelation_.numProp = numProp; + if (meshRelation_.triProperties.size() == 0) { + meshRelation_.triProperties.resize(numTri); + for_each_n(autoPolicy(numTri), countAt(0), numTri, [this](int tri) { + for (const int j : {0, 1, 2}) + meshRelation_.triProperties[tri][j] = halfedge_[3 * tri + j].startVert; + }); + } + Vec oldTriProp(numTri, {-1, -1, -1}); + meshRelation_.triProperties.swap(oldTriProp); + + for (int tri = 0; tri < numTri; ++tri) { + for (const int i : {0, 1, 2}) { + if (meshRelation_.triProperties[tri][i] >= 0) continue; + int startEdge = 3 * tri + i; + const int vert = halfedge_[startEdge].startVert; + + if (vertNumSharp[vert] < 2) { + const glm::vec3 normal = vertFlatFace[vert] >= 0 + ? faceNormal_[vertFlatFace[vert]] + : vertNormal_[vert]; + int lastProp = -1; + ForVert(startEdge, [&](int current) { + const int thisTri = current / 3; + const int j = current - 3 * thisTri; + const int prop = oldTriProp[thisTri][j]; + meshRelation_.triProperties[thisTri][j] = prop; + if (prop == lastProp) return; + lastProp = prop; + auto start = oldProperties.begin() + prop * oldNumProp; + std::copy(start, start + oldNumProp, + meshRelation_.properties.begin() + prop * numProp); + for (const int i : {0, 1, 2}) + meshRelation_.properties[prop * numProp + normalIdx + i] = + normal[i]; + }); + } else { + const glm::vec3 centerPos = vertPos_[vert]; + // Length degree + std::vector group; + // Length number of normals + std::vector normals; + int current = startEdge; + int prevFace = halfedge_[current].face; + + do { + int next = NextHalfedge(halfedge_[current].pairedHalfedge); + const int face = halfedge_[next].face; + + const float dihedral = glm::degrees( + glm::acos(glm::dot(faceNormal_[face], faceNormal_[prevFace]))); + if (dihedral > minSharpAngle || + triIsFlatFace[face] != triIsFlatFace[prevFace] || + (triIsFlatFace[face] && triIsFlatFace[prevFace] && + !meshRelation_.triRef[face].SameFace( + meshRelation_.triRef[prevFace]))) { + break; + } + current = next; + prevFace = face; + } while (current != startEdge); + + const int endEdge = current; + + struct FaceEdge { + int face; + glm::vec3 edgeVec; + }; + + ForVert( + endEdge, + [this, centerPos](int current) { + return FaceEdge( + {halfedge_[current].face, + glm::normalize(vertPos_[halfedge_[current].endVert] - + centerPos)}); + }, + [this, &triIsFlatFace, &normals, &group, minSharpAngle]( + int current, const FaceEdge& here, const FaceEdge& next) { + const float dihedral = glm::degrees(glm::acos( + glm::dot(faceNormal_[here.face], faceNormal_[next.face]))); + if (dihedral > minSharpAngle || + triIsFlatFace[here.face] != triIsFlatFace[next.face] || + (triIsFlatFace[here.face] && triIsFlatFace[next.face] && + !meshRelation_.triRef[here.face].SameFace( + meshRelation_.triRef[next.face]))) { + normals.push_back(glm::vec3(0)); + } + group.push_back(normals.size() - 1); + float dot = glm::dot(here.edgeVec, next.edgeVec); + const float phi = + dot >= 1 ? kTolerance + : (dot <= -1 ? glm::pi() : glm::acos(dot)); + normals.back() += faceNormal_[next.face] * phi; + }); + + for (auto& normal : normals) { + normal = glm::normalize(normal); + } + + int lastGroup = 0; + int lastProp = -1; + int newProp = -1; + int idx = 0; + ForVert(endEdge, [&](int current1) { + const int thisTri = current1 / 3; + const int j = current1 - 3 * thisTri; + const int prop = oldTriProp[thisTri][j]; + auto start = oldProperties.begin() + prop * oldNumProp; + + if (group[idx] != lastGroup && group[idx] != 0 && prop == lastProp) { + lastGroup = group[idx]; + newProp = NumPropVert(); + meshRelation_.properties.resize(meshRelation_.properties.size() + + numProp); + std::copy(start, start + oldNumProp, + meshRelation_.properties.begin() + newProp * numProp); + for (const int i : {0, 1, 2}) { + meshRelation_.properties[newProp * numProp + normalIdx + i] = + normals[group[idx]][i]; + } + } else if (prop != lastProp) { + lastProp = prop; + newProp = prop; + std::copy(start, start + oldNumProp, + meshRelation_.properties.begin() + prop * numProp); + for (const int i : {0, 1, 2}) + meshRelation_.properties[prop * numProp + normalIdx + i] = + normals[group[idx]][i]; + } + + meshRelation_.triProperties[thisTri][j] = newProp; + ++idx; + }); + } + } + } +} + +/** + * Tangents get flattened to create sharp edges by setting their weight to zero. + * This is the natural limit of reducing the weight to increase the sharpness + * smoothly. This limit gives a decent shape, but it causes the parameterization + * to be stretched and compresses it near the edges, which is good for resolving + * tight curvature, but bad for property interpolation. This function fixes the + * parameter stretch at the limit for sharp edges, since there is no curvature + * to resolve. Note this also changes the overall shape - making it more evenly + * curved. + */ +void Manifold::Impl::LinearizeFlatTangents() { + const int n = halfedgeTangent_.size(); + for_each_n( + autoPolicy(n), zip(halfedgeTangent_.begin(), countAt(0)), n, + [this](thrust::tuple inOut) { + glm::vec4& tangent = thrust::get<0>(inOut); + const int halfedge = thrust::get<1>(inOut); + glm::vec4& otherTangent = + halfedgeTangent_[halfedge_[halfedge].pairedHalfedge]; + + const glm::bvec2 flat(tangent.w == 0, otherTangent.w == 0); + if (!halfedge_[halfedge].IsForward() || (!flat[0] && !flat[1])) { + return; + } + + const glm::vec3 edgeVec = vertPos_[halfedge_[halfedge].endVert] - + vertPos_[halfedge_[halfedge].startVert]; + + if (flat[0] && flat[1]) { + tangent = glm::vec4(edgeVec / 3.0f, 1); + otherTangent = glm::vec4(-edgeVec / 3.0f, 1); + } else if (flat[0]) { + tangent = glm::vec4((edgeVec + glm::vec3(otherTangent)) / 2.0f, 1); + } else { + otherTangent = glm::vec4((-edgeVec + glm::vec3(tangent)) / 2.0f, 1); + } + }); +} + +/** + * Calculates halfedgeTangent_, allowing the manifold to be refined and + * smoothed. The tangents form weighted cubic Beziers along each edge. This + * function creates circular arcs where possible (minimizing maximum curvature), + * constrained to the indicated property normals. Across edges that form + * discontinuities in the normals, the tangent vectors are zero-length, allowing + * the shape to form a sharp corner with minimal oscillation. + */ +void Manifold::Impl::CreateTangents(int normalIdx) { + ZoneScoped; + const int numVert = NumVert(); + const int numHalfedge = halfedge_.size(); + halfedgeTangent_.resize(0); + Vec tangent(numHalfedge); + + Vec vertNormal(numVert); + Vec vertSharpHalfedge(numVert, glm::ivec2(-1)); + for (int e = 0; e < numHalfedge; ++e) { + const int vert = halfedge_[e].startVert; + auto& sharpHalfedge = vertSharpHalfedge[vert]; + if (sharpHalfedge[0] >= 0 && sharpHalfedge[1] >= 0) continue; + + int idx = 0; + // Only used when there is only one. + glm::vec3& lastNormal = vertNormal[vert]; + + ForVert( + e, + [normalIdx, this](int halfedge) { + return GetNormal(halfedge, normalIdx); + }, + [&sharpHalfedge, &idx, &lastNormal](int halfedge, + const glm::vec3& normal, + const glm::vec3& nextNormal) { + const glm::vec3 diff = nextNormal - normal; + if (glm::dot(diff, diff) > kTolerance * kTolerance) { + if (idx < 2) { + sharpHalfedge[idx++] = halfedge; + } else { + sharpHalfedge[0] = -1; // marks more than 2 sharp edges + } + } + lastNormal = normal; + }); + } + + for_each_n(autoPolicy(numHalfedge), + zip(tangent.begin(), halfedge_.cbegin(), countAt(0)), numHalfedge, + SmoothBezier({this, vertNormal})); + + halfedgeTangent_.swap(tangent); + + for (int vert = 0; vert < numVert; ++vert) { + const int first = vertSharpHalfedge[vert][0]; + const int second = vertSharpHalfedge[vert][1]; + if (second == -1) continue; + if (first != -1) { // Make continuous edge + const glm::vec3 newTangent = glm::normalize(glm::cross( + GetNormal(first, normalIdx), GetNormal(second, normalIdx))); + if (!isfinite(newTangent[0])) continue; + + halfedgeTangent_[first] = CircularTangent( + newTangent, vertPos_[halfedge_[first].endVert] - vertPos_[vert]); + halfedgeTangent_[second] = CircularTangent( + -newTangent, vertPos_[halfedge_[second].endVert] - vertPos_[vert]); + + ForVert(first, [this, first, second](int current) { + if (current != first && current != second && + !IsMarkedInsideQuad(current)) { + SharpenTangent(current, 0); + } + }); + } else { // Sharpen vertex uniformly + ForVert(second, [this](int current) { + if (!IsMarkedInsideQuad(current)) { + SharpenTangent(current, 0); + } + }); + } + } + LinearizeFlatTangents(); +} + +/** + * Calculates halfedgeTangent_, allowing the manifold to be refined and + * smoothed. The tangents form weighted cubic Beziers along each edge. This + * function creates circular arcs where possible (minimizing maximum curvature), + * constrained to the vertex normals. Where sharpenedEdges are specified, the + * tangents are shortened that intersect the sharpened edge, concentrating the + * curvature there, while the tangents of the sharp edges themselves are aligned + * for continuity. + */ +void Manifold::Impl::CreateTangents(std::vector sharpenedEdges) { + ZoneScoped; + const int numHalfedge = halfedge_.size(); + halfedgeTangent_.resize(0); + Vec tangent(numHalfedge); + + Vec triIsFlatFace = FlatFaces(); + Vec vertFlatFace = VertFlatFace(triIsFlatFace); + Vec vertNormal = vertNormal_; + for (int v = 0; v < NumVert(); ++v) { + if (vertFlatFace[v] >= 0) { + vertNormal[v] = faceNormal_[vertFlatFace[v]]; + } + } + + for_each_n(autoPolicy(numHalfedge), + zip(tangent.begin(), halfedge_.cbegin(), countAt(0)), numHalfedge, + SmoothBezier({this, vertNormal})); + + halfedgeTangent_.swap(tangent); + + // Add sharpened edges around faces, just on the face side. + for (int tri = 0; tri < NumTri(); ++tri) { + if (!triIsFlatFace[tri]) continue; + for (const int j : {0, 1, 2}) { + const int tri2 = halfedge_[3 * tri + j].pairedHalfedge / 3; + if (!triIsFlatFace[tri2] || + !meshRelation_.triRef[tri].SameFace(meshRelation_.triRef[tri2])) { + sharpenedEdges.push_back({3 * tri + j, 0}); + } + } + } + + if (sharpenedEdges.empty()) return; + + using Pair = std::pair; + // Fill in missing pairs with default smoothness = 1. + std::map edges; + for (Smoothness edge : sharpenedEdges) { + if (edge.smoothness >= 1) continue; + const bool forward = halfedge_[edge.halfedge].IsForward(); + const int pair = halfedge_[edge.halfedge].pairedHalfedge; + const int idx = forward ? edge.halfedge : pair; + if (edges.find(idx) == edges.end()) { + edges[idx] = {edge, {pair, 1}}; + if (!forward) std::swap(edges[idx].first, edges[idx].second); + } else { + Smoothness& e = forward ? edges[idx].first : edges[idx].second; + e.smoothness = glm::min(edge.smoothness, e.smoothness); + } + } + + std::map> vertTangents; + for (const auto& value : edges) { + const Pair edge = value.second; + vertTangents[halfedge_[edge.first.halfedge].startVert].push_back(edge); + vertTangents[halfedge_[edge.second.halfedge].startVert].push_back( + {edge.second, edge.first}); + } + + for (const auto& value : vertTangents) { + const std::vector& vert = value.second; + // Sharp edges that end are smooth at their terminal vert. + if (vert.size() == 1) continue; + if (vert.size() == 2) { // Make continuous edge + const int first = vert[0].first.halfedge; + const int second = vert[1].first.halfedge; + const glm::vec3 newTangent = + glm::normalize(glm::vec3(halfedgeTangent_[first]) - + glm::vec3(halfedgeTangent_[second])); + + const glm::vec3 pos = vertPos_[halfedge_[first].startVert]; + halfedgeTangent_[first] = + CircularTangent(newTangent, vertPos_[halfedge_[first].endVert] - pos); + halfedgeTangent_[second] = CircularTangent( + -newTangent, vertPos_[halfedge_[second].endVert] - pos); + + float smoothness = + (vert[0].second.smoothness + vert[1].first.smoothness) / 2; + ForVert(first, [this, &smoothness, &vert, first, second](int current) { + if (current == second) { + smoothness = + (vert[1].second.smoothness + vert[0].first.smoothness) / 2; + } else if (current != first && !IsMarkedInsideQuad(current)) { + SharpenTangent(current, smoothness); + } + }); + } else { // Sharpen vertex uniformly + float smoothness = 0; + for (const Pair& pair : vert) { + smoothness += pair.first.smoothness; + smoothness += pair.second.smoothness; + } + smoothness /= 2 * vert.size(); + + ForVert(vert[0].first.halfedge, [this, smoothness](int current) { + if (!IsMarkedInsideQuad(current)) { + SharpenTangent(current, smoothness); + } + }); + } + } + LinearizeFlatTangents(); +} + +void Manifold::Impl::Refine(std::function edgeDivisions) { + if (IsEmpty()) return; + Manifold::Impl old = *this; + Vec vertBary = Subdivide(edgeDivisions); + if (vertBary.size() == 0) return; + + if (old.halfedgeTangent_.size() == old.halfedge_.size()) { + for_each_n(autoPolicy(NumTri()), zip(vertPos_.begin(), vertBary.begin()), + NumVert(), InterpTri({&old})); + // Make original since the subdivided faces have been warped into + // being non-coplanar, and hence not being related to the original faces. + meshRelation_.originalID = ReserveIDs(1); + InitializeOriginal(); + CreateFaces(); + } + + halfedgeTangent_.resize(0); + Finish(); +} + +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/sort.cpp b/thirdparty/manifold/src/manifold/src/sort.cpp new file mode 100644 index 000000000000..a5dba9b3dcd0 --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/sort.cpp @@ -0,0 +1,616 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "impl.h" +#include "par.h" + +namespace { +using namespace manifold; + +constexpr uint32_t kNoCode = 0xFFFFFFFFu; + +struct Extrema : public thrust::binary_function { + void MakeForward(Halfedge& a) { + if (!a.IsForward()) { + int tmp = a.startVert; + a.startVert = a.endVert; + a.endVert = tmp; + } + } + + int MaxOrMinus(int a, int b) { + return glm::min(a, b) < 0 ? -1 : glm::max(a, b); + } + + Halfedge operator()(Halfedge a, Halfedge b) { + MakeForward(a); + MakeForward(b); + a.startVert = glm::min(a.startVert, b.startVert); + a.endVert = glm::max(a.endVert, b.endVert); + a.face = MaxOrMinus(a.face, b.face); + a.pairedHalfedge = MaxOrMinus(a.pairedHalfedge, b.pairedHalfedge); + return a; + } +}; + +uint32_t SpreadBits3(uint32_t v) { + v = 0xFF0000FFu & (v * 0x00010001u); + v = 0x0F00F00Fu & (v * 0x00000101u); + v = 0xC30C30C3u & (v * 0x00000011u); + v = 0x49249249u & (v * 0x00000005u); + return v; +} + +uint32_t MortonCode(glm::vec3 position, Box bBox) { + // Unreferenced vertices are marked NaN, and this will sort them to the end + // (the Morton code only uses the first 30 of 32 bits). + if (isnan(position.x)) return kNoCode; + + glm::vec3 xyz = (position - bBox.min) / (bBox.max - bBox.min); + xyz = glm::min(glm::vec3(1023.0f), glm::max(glm::vec3(0.0f), 1024.0f * xyz)); + uint32_t x = SpreadBits3(static_cast(xyz.x)); + uint32_t y = SpreadBits3(static_cast(xyz.y)); + uint32_t z = SpreadBits3(static_cast(xyz.z)); + return x * 4 + y * 2 + z; +} + +struct Morton { + const Box bBox; + + void operator()(thrust::tuple inout) { + glm::vec3 position = thrust::get<1>(inout); + thrust::get<0>(inout) = MortonCode(position, bBox); + } +}; + +struct FaceMortonBox { + VecView halfedge; + VecView vertPos; + const Box bBox; + + void operator()(thrust::tuple inout) { + uint32_t& mortonCode = thrust::get<0>(inout); + Box& faceBox = thrust::get<1>(inout); + int face = thrust::get<2>(inout); + + // Removed tris are marked by all halfedges having pairedHalfedge = -1, and + // this will sort them to the end (the Morton code only uses the first 30 of + // 32 bits). + if (halfedge[3 * face].pairedHalfedge < 0) { + mortonCode = kNoCode; + return; + } + + glm::vec3 center(0.0f); + + for (const int i : {0, 1, 2}) { + const glm::vec3 pos = vertPos[halfedge[3 * face + i].startVert]; + center += pos; + faceBox.Union(pos); + } + center /= 3; + + mortonCode = MortonCode(center, bBox); + } +}; + +struct Reindex { + VecView indexInv; + + void operator()(Halfedge& edge) { + if (edge.startVert < 0) return; + edge.startVert = indexInv[edge.startVert]; + edge.endVert = indexInv[edge.endVert]; + } +}; + +struct MarkProp { + VecView keep; + + void operator()(glm::ivec3 triProp) { + for (const int i : {0, 1, 2}) { + reinterpret_cast*>(&keep[triProp[i]]) + ->store(1, std::memory_order_relaxed); + } + } +}; + +struct GatherProps { + VecView properties; + VecView oldProperties; + const int numProp; + + void operator()(thrust::tuple in) { + const int oldIdx = thrust::get<0>(in); + const int newIdx = thrust::get<1>(in); + const int keep = thrust::get<2>(in); + if (keep == 0) return; + for (int p = 0; p < numProp; ++p) { + properties[newIdx * numProp + p] = oldProperties[oldIdx * numProp + p]; + } + } +}; + +struct ReindexProps { + VecView old2new; + + void operator()(glm::ivec3& triProp) { + for (const int i : {0, 1, 2}) { + triProp[i] = old2new[triProp[i]]; + } + } +}; + +template +void Permute(Vec& inOut, const Vec& new2Old) { + Vec tmp(std::move(inOut)); + inOut.resize(new2Old.size()); + gather(autoPolicy(new2Old.size()), new2Old.begin(), new2Old.end(), + tmp.begin(), inOut.begin()); +} + +template void Permute(Vec&, const Vec&); +template void Permute(Vec&, const Vec&); + +struct ReindexFace { + VecView halfedge; + VecView halfedgeTangent; + VecView oldHalfedge; + VecView oldHalfedgeTangent; + VecView faceNew2Old; + VecView faceOld2New; + + void operator()(int newFace) { + const int oldFace = faceNew2Old[newFace]; + for (const int i : {0, 1, 2}) { + const int oldEdge = 3 * oldFace + i; + Halfedge edge = oldHalfedge[oldEdge]; + edge.face = newFace; + const int pairedFace = edge.pairedHalfedge / 3; + const int offset = edge.pairedHalfedge - 3 * pairedFace; + edge.pairedHalfedge = 3 * faceOld2New[pairedFace] + offset; + const int newEdge = 3 * newFace + i; + halfedge[newEdge] = edge; + if (!oldHalfedgeTangent.empty()) { + halfedgeTangent[newEdge] = oldHalfedgeTangent[oldEdge]; + } + } + } +}; + +struct VertMortonBox { + VecView vertProperties; + const uint32_t numProp; + const float tol; + const Box bBox; + + void operator()(thrust::tuple inout) { + uint32_t& mortonCode = thrust::get<0>(inout); + Box& vertBox = thrust::get<1>(inout); + int vert = thrust::get<2>(inout); + + const glm::vec3 center(vertProperties[numProp * vert], + vertProperties[numProp * vert + 1], + vertProperties[numProp * vert + 2]); + + vertBox.min = center - tol / 2; + vertBox.max = center + tol / 2; + + mortonCode = MortonCode(center, bBox); + } +}; + +struct Duplicate { + thrust::pair operator()(float x) { + return thrust::make_pair(x, x); + } +}; + +struct MinMax : public thrust::binary_function, + thrust::pair, + thrust::pair> { + thrust::pair operator()(thrust::pair a, + thrust::pair b) { + return thrust::make_pair(glm::min(a.first, b.first), + glm::max(a.second, b.second)); + } +}; +} // namespace + +namespace manifold { + +/** + * Once halfedge_ has been filled in, this function can be called to create the + * rest of the internal data structures. This function also removes the verts + * and halfedges flagged for removal (NaN verts and -1 halfedges). + */ +void Manifold::Impl::Finish() { + if (halfedge_.size() == 0) return; + + CalculateBBox(); + SetPrecision(precision_); + if (!bBox_.IsFinite()) { + // Decimated out of existence - early out. + MarkFailure(Error::NoError); + return; + } + + SortVerts(); + Vec faceBox; + Vec faceMorton; + GetFaceBoxMorton(faceBox, faceMorton); + SortFaces(faceBox, faceMorton); + if (halfedge_.size() == 0) return; + CompactProps(); + + ASSERT(halfedge_.size() % 6 == 0, topologyErr, + "Not an even number of faces after sorting faces!"); + +#ifdef MANIFOLD_DEBUG + Halfedge extrema = {0, 0, 0, 0}; + extrema = reduce(autoPolicy(halfedge_.size()), halfedge_.begin(), + halfedge_.end(), extrema, Extrema()); +#endif + + ASSERT(extrema.startVert >= 0, topologyErr, "Vertex index is negative!"); + ASSERT(extrema.endVert < NumVert(), topologyErr, + "Vertex index exceeds number of verts!"); + ASSERT(extrema.face >= 0, topologyErr, "Face index is negative!"); + ASSERT(extrema.face < NumTri(), topologyErr, + "Face index exceeds number of faces!"); + ASSERT(extrema.pairedHalfedge >= 0, topologyErr, + "Halfedge index is negative!"); + ASSERT(extrema.pairedHalfedge < 2 * NumEdge(), topologyErr, + "Halfedge index exceeds number of halfedges!"); + ASSERT(meshRelation_.triRef.size() == NumTri() || + meshRelation_.triRef.size() == 0, + logicErr, "Mesh Relation doesn't fit!"); + ASSERT(faceNormal_.size() == NumTri() || faceNormal_.size() == 0, logicErr, + "faceNormal size = " + std::to_string(faceNormal_.size()) + + ", NumTri = " + std::to_string(NumTri())); + // TODO: figure out why this has a flaky failure and then enable reading + // vertNormals from a Mesh. + // ASSERT(vertNormal_.size() == NumVert() || vertNormal_.size() == 0, + // logicErr, + // "vertNormal size = " + std::to_string(vertNormal_.size()) + + // ", NumVert = " + std::to_string(NumVert())); + + CalculateNormals(); + collider_ = Collider(faceBox, faceMorton); + + ASSERT(Is2Manifold(), logicErr, "mesh is not 2-manifold!"); +} + +/** + * Sorts the vertices according to their Morton code. + */ +void Manifold::Impl::SortVerts() { + ZoneScoped; + const int numVert = NumVert(); + Vec vertMorton(numVert); + auto policy = autoPolicy(numVert); + for_each_n(policy, zip(vertMorton.begin(), vertPos_.cbegin()), numVert, + Morton({bBox_})); + + Vec vertNew2Old(numVert); + sequence(policy, vertNew2Old.begin(), vertNew2Old.end()); + + stable_sort(policy, zip(vertMorton.begin(), vertNew2Old.begin()), + zip(vertMorton.end(), vertNew2Old.end()), + [](const thrust::tuple& a, + const thrust::tuple& b) { + return thrust::get<0>(a) < thrust::get<0>(b); + }); + + ReindexVerts(vertNew2Old, numVert); + + // Verts were flagged for removal with NaNs and assigned kNoCode to sort + // them to the end, which allows them to be removed. + const int newNumVert = + std::lower_bound(vertMorton.begin(), vertMorton.end(), kNoCode) - + vertMorton.begin(); + + vertNew2Old.resize(newNumVert); + Permute(vertPos_, vertNew2Old); + + if (vertNormal_.size() == numVert) { + Permute(vertNormal_, vertNew2Old); + } +} + +/** + * Updates the halfedges to point to new vert indices based on a mapping, + * vertNew2Old. This may be a subset, so the total number of original verts is + * also given. + */ +void Manifold::Impl::ReindexVerts(const Vec& vertNew2Old, int oldNumVert) { + ZoneScoped; + Vec vertOld2New(oldNumVert); + scatter(autoPolicy(oldNumVert), countAt(0), countAt(NumVert()), + vertNew2Old.begin(), vertOld2New.begin()); + for_each(autoPolicy(oldNumVert), halfedge_.begin(), halfedge_.end(), + Reindex({vertOld2New})); +} + +/** + * Removes unreferenced property verts and reindexes triProperties. + */ +void Manifold::Impl::CompactProps() { + ZoneScoped; + if (meshRelation_.numProp == 0) return; + + const int numVerts = meshRelation_.properties.size() / meshRelation_.numProp; + Vec keep(numVerts, 0); + auto policy = autoPolicy(numVerts); + + for_each(policy, meshRelation_.triProperties.cbegin(), + meshRelation_.triProperties.cend(), MarkProp({keep})); + Vec propOld2New(numVerts + 1, 0); + inclusive_scan(policy, keep.begin(), keep.end(), propOld2New.begin() + 1); + + Vec oldProp = meshRelation_.properties; + const int numVertsNew = propOld2New[numVerts]; + meshRelation_.properties.resize(meshRelation_.numProp * numVertsNew); + for_each_n( + policy, zip(countAt(0), propOld2New.cbegin(), keep.cbegin()), numVerts, + GatherProps({meshRelation_.properties, oldProp, meshRelation_.numProp})); + for_each_n(policy, meshRelation_.triProperties.begin(), NumTri(), + ReindexProps({propOld2New})); +} + +/** + * Fills the faceBox and faceMorton input with the bounding boxes and Morton + * codes of the faces, respectively. The Morton code is based on the center of + * the bounding box. + */ +void Manifold::Impl::GetFaceBoxMorton(Vec& faceBox, + Vec& faceMorton) const { + ZoneScoped; + faceBox.resize(NumTri()); + faceMorton.resize(NumTri()); + for_each_n(autoPolicy(NumTri()), + zip(faceMorton.begin(), faceBox.begin(), countAt(0)), NumTri(), + FaceMortonBox({halfedge_, vertPos_, bBox_})); +} + +/** + * Sorts the faces of this manifold according to their input Morton code. The + * bounding box and Morton code arrays are also sorted accordingly. + */ +void Manifold::Impl::SortFaces(Vec& faceBox, Vec& faceMorton) { + ZoneScoped; + Vec faceNew2Old(NumTri()); + auto policy = autoPolicy(faceNew2Old.size()); + sequence(policy, faceNew2Old.begin(), faceNew2Old.end()); + + stable_sort(policy, zip(faceMorton.begin(), faceNew2Old.begin()), + zip(faceMorton.end(), faceNew2Old.end()), + [](const thrust::tuple& a, + const thrust::tuple& b) { + return thrust::get<0>(a) < thrust::get<0>(b); + }); + + // Tris were flagged for removal with pairedHalfedge = -1 and assigned kNoCode + // to sort them to the end, which allows them to be removed. + const int newNumTri = + find(policy, faceMorton.begin(), + faceMorton.end(), kNoCode) - + faceMorton.begin(); + faceMorton.resize(newNumTri); + faceNew2Old.resize(newNumTri); + + Permute(faceBox, faceNew2Old); + GatherFaces(faceNew2Old); +} + +/** + * Creates the halfedge_ vector for this manifold by copying a set of faces from + * another manifold, given by oldHalfedge. Input faceNew2Old defines the old + * faces to gather into this. + */ +void Manifold::Impl::GatherFaces(const Vec& faceNew2Old) { + ZoneScoped; + const int numTri = faceNew2Old.size(); + if (meshRelation_.triRef.size() == NumTri()) + Permute(meshRelation_.triRef, faceNew2Old); + if (meshRelation_.triProperties.size() == NumTri()) + Permute(meshRelation_.triProperties, faceNew2Old); + if (faceNormal_.size() == NumTri()) Permute(faceNormal_, faceNew2Old); + + Vec oldHalfedge(std::move(halfedge_)); + Vec oldHalfedgeTangent(std::move(halfedgeTangent_)); + Vec faceOld2New(oldHalfedge.size() / 3); + auto policy = autoPolicy(numTri); + scatter(policy, countAt(0), countAt(numTri), faceNew2Old.begin(), + faceOld2New.begin()); + + halfedge_.resize(3 * numTri); + if (oldHalfedgeTangent.size() != 0) halfedgeTangent_.resize(3 * numTri); + for_each_n(policy, countAt(0), numTri, + ReindexFace({halfedge_, halfedgeTangent_, oldHalfedge, + oldHalfedgeTangent, faceNew2Old, faceOld2New})); +} + +void Manifold::Impl::GatherFaces(const Impl& old, const Vec& faceNew2Old) { + ZoneScoped; + const int numTri = faceNew2Old.size(); + auto policy = autoPolicy(numTri); + + meshRelation_.triRef.resize(numTri); + gather(policy, faceNew2Old.begin(), faceNew2Old.end(), + old.meshRelation_.triRef.begin(), meshRelation_.triRef.begin()); + + for (const auto& pair : old.meshRelation_.meshIDtransform) { + meshRelation_.meshIDtransform[pair.first] = pair.second; + } + + if (old.meshRelation_.triProperties.size() > 0) { + meshRelation_.triProperties.resize(numTri); + gather(policy, faceNew2Old.begin(), faceNew2Old.end(), + old.meshRelation_.triProperties.begin(), + meshRelation_.triProperties.begin()); + meshRelation_.numProp = old.meshRelation_.numProp; + meshRelation_.properties = old.meshRelation_.properties; + } + + if (old.faceNormal_.size() == old.NumTri()) { + faceNormal_.resize(numTri); + gather(policy, faceNew2Old.begin(), faceNew2Old.end(), + old.faceNormal_.begin(), faceNormal_.begin()); + } + + Vec faceOld2New(old.NumTri()); + scatter(policy, countAt(0), countAt(numTri), faceNew2Old.begin(), + faceOld2New.begin()); + + halfedge_.resize(3 * numTri); + if (old.halfedgeTangent_.size() != 0) halfedgeTangent_.resize(3 * numTri); + for_each_n(policy, countAt(0), numTri, + ReindexFace({halfedge_, halfedgeTangent_, old.halfedge_, + old.halfedgeTangent_, faceNew2Old, faceOld2New})); +} + +/// Constructs a position-only MeshGL from the input Mesh. +MeshGL::MeshGL(const Mesh& mesh) { + numProp = 3; + precision = mesh.precision; + vertProperties.resize(numProp * mesh.vertPos.size()); + for (int i = 0; i < mesh.vertPos.size(); ++i) { + for (int j : {0, 1, 2}) vertProperties[3 * i + j] = mesh.vertPos[i][j]; + } + triVerts.resize(3 * mesh.triVerts.size()); + for (int i = 0; i < mesh.triVerts.size(); ++i) { + for (int j : {0, 1, 2}) triVerts[3 * i + j] = mesh.triVerts[i][j]; + } + halfedgeTangent.resize(4 * mesh.halfedgeTangent.size()); + for (int i = 0; i < mesh.halfedgeTangent.size(); ++i) { + for (int j : {0, 1, 2, 3}) + halfedgeTangent[4 * i + j] = mesh.halfedgeTangent[i][j]; + } +} + +/** + * Updates the mergeFromVert and mergeToVert vectors in order to create a + * manifold solid. If the MeshGL is already manifold, no change will occur and + * the function will return false. Otherwise, this will merge verts along open + * edges within precision (the maximum of the MeshGL precision and the baseline + * bounding-box precision), keeping any from the existing merge vectors. + * + * There is no guarantee the result will be manifold - this is a best-effort + * helper function designed primarily to aid in the case where a manifold + * multi-material MeshGL was produced, but its merge vectors were lost due to a + * round-trip through a file format. Constructing a Manifold from the result + * will report a Status if it is not manifold. + */ +bool MeshGL::Merge() { + ZoneScoped; + std::multiset> openEdges; + + std::vector merge(NumVert()); + std::iota(merge.begin(), merge.end(), 0); + for (int i = 0; i < mergeFromVert.size(); ++i) { + merge[mergeFromVert[i]] = mergeToVert[i]; + } + + const int numVert = NumVert(); + const int numTri = NumTri(); + const int next[3] = {1, 2, 0}; + for (int tri = 0; tri < numTri; ++tri) { + for (int i : {0, 1, 2}) { + auto edge = std::make_pair(merge[triVerts[3 * tri + next[i]]], + merge[triVerts[3 * tri + i]]); + auto it = openEdges.find(edge); + if (it == openEdges.end()) { + std::swap(edge.first, edge.second); + openEdges.insert(edge); + } else { + openEdges.erase(it); + } + } + } + + if (openEdges.empty()) { + return false; + } + + const int numOpenVert = openEdges.size(); + Vec openVerts(numOpenVert); + int i = 0; + for (const auto& edge : openEdges) { + const int vert = edge.first; + openVerts[i++] = vert; + } + + Vec vertPropD(vertProperties); + Box bBox; + for (const int i : {0, 1, 2}) { + strided_range::Iter> iPos(vertPropD.begin() + i, vertPropD.end(), + numProp); + auto minMax = transform_reduce>( + autoPolicy(numVert), iPos.begin(), iPos.end(), Duplicate(), + thrust::make_pair(std::numeric_limits::infinity(), + -std::numeric_limits::infinity()), + MinMax()); + bBox.min[i] = minMax.first; + bBox.max[i] = minMax.second; + } + precision = MaxPrecision(precision, bBox); + if (precision < 0) return false; + + auto policy = autoPolicy(numOpenVert); + Vec vertBox(numOpenVert); + Vec vertMorton(numOpenVert); + + for_each_n(policy, + zip(vertMorton.begin(), vertBox.begin(), openVerts.cbegin()), + numOpenVert, VertMortonBox({vertPropD, numProp, precision, bBox})); + + stable_sort(policy, + zip(vertMorton.begin(), vertBox.begin(), openVerts.begin()), + zip(vertMorton.end(), vertBox.end(), openVerts.end()), + [](const thrust::tuple& a, + const thrust::tuple& b) { + return thrust::get<0>(a) < thrust::get<0>(b); + }); + + Collider collider(vertBox, vertMorton); + SparseIndices toMerge = collider.Collisions(vertBox.cview()); + + UnionFind<> uf(numVert); + for (int i = 0; i < mergeFromVert.size(); ++i) { + uf.unionXY(static_cast(mergeFromVert[i]), + static_cast(mergeToVert[i])); + } + for (int i = 0; i < toMerge.size(); ++i) { + uf.unionXY(openVerts[toMerge.Get(i, false)], + openVerts[toMerge.Get(i, true)]); + } + + mergeToVert.clear(); + mergeFromVert.clear(); + for (int v = 0; v < numVert; ++v) { + const int mergeTo = uf.find(v); + if (mergeTo != v) { + mergeFromVert.push_back(v); + mergeToVert.push_back(mergeTo); + } + } + + return true; +} +} // namespace manifold diff --git a/thirdparty/manifold/src/manifold/src/subdivision.cpp b/thirdparty/manifold/src/manifold/src/subdivision.cpp new file mode 100644 index 000000000000..3c5995bb516a --- /dev/null +++ b/thirdparty/manifold/src/manifold/src/subdivision.cpp @@ -0,0 +1,792 @@ +// Copyright 2024 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "impl.h" +#include "par.h" + +template <> +struct std::hash { + size_t operator()(const glm::ivec4& p) const { + return std::hash()(p.x) ^ std::hash()(p.y) ^ + std::hash()(p.z) ^ std::hash()(p.w); + } +}; + +namespace { +using namespace manifold; + +struct ReindexHalfedge { + VecView half2Edge; + const VecView halfedges; + + void operator()(thrust::tuple in) { + const int edge = thrust::get<0>(in); + const int halfedge = thrust::get<1>(in).halfedgeIdx; + + half2Edge[halfedge] = edge; + half2Edge[halfedges[halfedge].pairedHalfedge] = edge; + } +}; + +class Partition { + public: + // The cached partitions don't have idx - it's added to the copy returned + // from GetPartition that contains the mapping of the input divisions into the + // sorted divisions that are uniquely cached. + glm::ivec4 idx; + glm::ivec4 sortedDivisions; + Vec vertBary; + Vec triVert; + + int InteriorOffset() const { + return sortedDivisions[0] + sortedDivisions[1] + sortedDivisions[2] + + sortedDivisions[3]; + } + + int NumInterior() const { return vertBary.size() - InteriorOffset(); } + + static Partition GetPartition(glm::ivec4 divisions) { + if (divisions[0] == 0) return Partition(); // skip wrong side of quad + + glm::ivec4 sortedDiv = divisions; + glm::ivec4 triIdx = {0, 1, 2, 3}; + if (divisions[3] == 0) { // triangle + if (sortedDiv[2] > sortedDiv[1]) { + std::swap(sortedDiv[2], sortedDiv[1]); + std::swap(triIdx[2], triIdx[1]); + } + if (sortedDiv[1] > sortedDiv[0]) { + std::swap(sortedDiv[1], sortedDiv[0]); + std::swap(triIdx[1], triIdx[0]); + if (sortedDiv[2] > sortedDiv[1]) { + std::swap(sortedDiv[2], sortedDiv[1]); + std::swap(triIdx[2], triIdx[1]); + } + } + } else { // quad + int minIdx = 0; + int min = divisions[minIdx]; + int next = divisions[1]; + for (const int i : {1, 2, 3}) { + const int n = divisions[(i + 1) % 4]; + if (divisions[i] < min || (divisions[i] == min && n < next)) { + minIdx = i; + min = divisions[i]; + next = n; + } + } + // Backwards (mirrored) quads get a separate cache key for now for + // simplicity, so there is no reversal necessary for quads when + // re-indexing. + glm::ivec4 tmp = sortedDiv; + for (const int i : {0, 1, 2, 3}) { + triIdx[i] = (i + minIdx) % 4; + sortedDiv[i] = tmp[triIdx[i]]; + } + } + + Partition partition = GetCachedPartition(sortedDiv); + partition.idx = triIdx; + + return partition; + } + + Vec Reindex(glm::ivec4 tri, glm::ivec4 edgeOffsets, + glm::bvec4 edgeFwd, int interiorOffset) const { + Vec newVerts; + newVerts.reserve(vertBary.size()); + glm::ivec4 triIdx = idx; + glm::ivec4 outTri = {0, 1, 2, 3}; + if (tri[3] < 0 && idx[1] != Next3(idx[0])) { + triIdx = {idx[2], idx[0], idx[1], idx[3]}; + edgeFwd = glm::not_(edgeFwd); + std::swap(outTri[0], outTri[1]); + } + for (const int i : {0, 1, 2, 3}) { + if (tri[triIdx[i]] >= 0) newVerts.push_back(tri[triIdx[i]]); + } + for (const int i : {0, 1, 2, 3}) { + const int n = sortedDivisions[i] - 1; + int offset = edgeOffsets[idx[i]] + (edgeFwd[idx[i]] ? 0 : n - 1); + for (int j = 0; j < n; ++j) { + newVerts.push_back(offset); + offset += edgeFwd[idx[i]] ? 1 : -1; + } + } + const int offset = interiorOffset - newVerts.size(); + for (int i = newVerts.size(); i < vertBary.size(); ++i) { + newVerts.push_back(i + offset); + } + + const int numTri = triVert.size(); + Vec newTriVert(numTri); + for_each_n( + autoPolicy(numTri), zip(newTriVert.begin(), triVert.begin()), numTri, + [&outTri, &newVerts](thrust::tuple inOut) { + for (const int j : {0, 1, 2}) { + thrust::get<0>(inOut)[outTri[j]] = + newVerts[thrust::get<1>(inOut)[j]]; + } + }); + return newTriVert; + } + + private: + static inline auto cacheLock = std::mutex(); + static inline auto cache = + std::unordered_map>(); + + // This triangulation is purely topological - it depends only on the number of + // divisions of the three sides of the triangle. This allows them to be cached + // and reused for similar triangles. The shape of the final surface is defined + // by the tangents and the barycentric coordinates of the new verts. For + // triangles, the input must be sorted: n[0] >= n[1] >= n[2] > 0. + static Partition GetCachedPartition(glm::ivec4 n) { + { + auto lockGuard = std::lock_guard(cacheLock); + auto cached = cache.find(n); + if (cached != cache.end()) { + return *cached->second; + } + } + Partition partition; + partition.sortedDivisions = n; + if (n[3] > 0) { // quad + partition.vertBary.push_back({1, 0, 0, 0}); + partition.vertBary.push_back({0, 1, 0, 0}); + partition.vertBary.push_back({0, 0, 1, 0}); + partition.vertBary.push_back({0, 0, 0, 1}); + glm::ivec4 edgeOffsets; + edgeOffsets[0] = 4; + for (const int i : {0, 1, 2, 3}) { + if (i > 0) { + edgeOffsets[i] = edgeOffsets[i - 1] + n[i - 1] - 1; + } + const glm::vec4 nextBary = partition.vertBary[(i + 1) % 4]; + for (int j = 1; j < n[i]; ++j) { + partition.vertBary.push_back( + glm::mix(partition.vertBary[i], nextBary, (float)j / n[i])); + } + } + PartitionQuad(partition.triVert, partition.vertBary, {0, 1, 2, 3}, + edgeOffsets, n - 1, {true, true, true, true}); + } else { // tri + partition.vertBary.push_back({1, 0, 0, 0}); + partition.vertBary.push_back({0, 1, 0, 0}); + partition.vertBary.push_back({0, 0, 1, 0}); + for (const int i : {0, 1, 2}) { + const glm::vec4 nextBary = partition.vertBary[(i + 1) % 3]; + for (int j = 1; j < n[i]; ++j) { + partition.vertBary.push_back( + glm::mix(partition.vertBary[i], nextBary, (float)j / n[i])); + } + } + const glm::ivec3 edgeOffsets = {3, 3 + n[0] - 1, 3 + n[0] - 1 + n[1] - 1}; + + const float f = n[2] * n[2] + n[0] * n[0]; + if (n[1] == 1) { + if (n[0] == 1) { + partition.triVert.push_back({0, 1, 2}); + } else { + PartitionFan(partition.triVert, {0, 1, 2}, n[0] - 1, edgeOffsets[0]); + } + } else if (n[1] * n[1] > + f - glm::sqrt(2.0f) * n[0] * n[2]) { // acute-ish + partition.triVert.push_back({edgeOffsets[1] - 1, 1, edgeOffsets[1]}); + PartitionQuad(partition.triVert, partition.vertBary, + {edgeOffsets[1] - 1, edgeOffsets[1], 2, 0}, + {-1, edgeOffsets[1] + 1, edgeOffsets[2], edgeOffsets[0]}, + {0, n[1] - 2, n[2] - 1, n[0] - 2}, + {true, true, true, true}); + } else { // obtuse -> spit into two acute + // portion of n[0] under n[2] + const int ns = + glm::min(n[0] - 2, (int)glm::round((f - n[1] * n[1]) / (2 * n[0]))); + // height from n[0]: nh <= n[2] + const int nh = + glm::max(1., glm::round(glm::sqrt(n[2] * n[2] - ns * ns))); + + const int hOffset = partition.vertBary.size(); + const glm::vec4 middleBary = + partition.vertBary[edgeOffsets[0] + ns - 1]; + for (int j = 1; j < nh; ++j) { + partition.vertBary.push_back( + glm::mix(partition.vertBary[2], middleBary, (float)j / nh)); + } + + partition.triVert.push_back({edgeOffsets[1] - 1, 1, edgeOffsets[1]}); + PartitionQuad( + partition.triVert, partition.vertBary, + {edgeOffsets[1] - 1, edgeOffsets[1], 2, edgeOffsets[0] + ns - 1}, + {-1, edgeOffsets[1] + 1, hOffset, edgeOffsets[0] + ns}, + {0, n[1] - 2, nh - 1, n[0] - ns - 2}, {true, true, true, true}); + + if (n[2] == 1) { + PartitionFan(partition.triVert, {0, edgeOffsets[0] + ns - 1, 2}, + ns - 1, edgeOffsets[0]); + } else { + if (ns == 1) { + partition.triVert.push_back({hOffset, 2, edgeOffsets[2]}); + PartitionQuad(partition.triVert, partition.vertBary, + {hOffset, edgeOffsets[2], 0, edgeOffsets[0]}, + {-1, edgeOffsets[2] + 1, -1, hOffset + nh - 2}, + {0, n[2] - 2, ns - 1, nh - 2}, + {true, true, true, false}); + } else { + partition.triVert.push_back({hOffset - 1, 0, edgeOffsets[0]}); + PartitionQuad( + partition.triVert, partition.vertBary, + {hOffset - 1, edgeOffsets[0], edgeOffsets[0] + ns - 1, 2}, + {-1, edgeOffsets[0] + 1, hOffset + nh - 2, edgeOffsets[2]}, + {0, ns - 2, nh - 1, n[2] - 2}, {true, true, false, true}); + } + } + } + } + + auto lockGuard = std::lock_guard(cacheLock); + cache.insert({n, std::make_unique(partition)}); + return partition; + } + + // Side 0 has added edges while sides 1 and 2 do not. Fan spreads from vert 2. + static void PartitionFan(Vec& triVert, glm::ivec3 cornerVerts, + int added, int edgeOffset) { + int last = cornerVerts[0]; + for (int i = 0; i < added; ++i) { + const int next = edgeOffset + i; + triVert.push_back({last, next, cornerVerts[2]}); + last = next; + } + triVert.push_back({last, cornerVerts[1], cornerVerts[2]}); + } + + // Partitions are parallel to the first edge unless two consecutive edgeAdded + // are zero, in which case a terminal triangulation is performed. + static void PartitionQuad(Vec& triVert, Vec& vertBary, + glm::ivec4 cornerVerts, glm::ivec4 edgeOffsets, + glm::ivec4 edgeAdded, glm::bvec4 edgeFwd) { + auto GetEdgeVert = [&](int edge, int idx) { + return edgeOffsets[edge] + (edgeFwd[edge] ? 1 : -1) * idx; + }; + + ASSERT(glm::all(glm::greaterThanEqual(edgeAdded, glm::ivec4(0))), logicErr, + "negative divisions!"); + + int corner = -1; + int last = 3; + int maxEdge = -1; + for (const int i : {0, 1, 2, 3}) { + if (corner == -1 && edgeAdded[i] == 0 && edgeAdded[last] == 0) { + corner = i; + } + if (edgeAdded[i] > 0) { + maxEdge = maxEdge == -1 ? i : -2; + } + last = i; + } + if (corner >= 0) { // terminate + if (maxEdge >= 0) { + glm::ivec4 edge = (glm::ivec4(0, 1, 2, 3) + maxEdge) % 4; + const int middle = edgeAdded[maxEdge] / 2; + triVert.push_back({cornerVerts[edge[2]], cornerVerts[edge[3]], + GetEdgeVert(maxEdge, middle)}); + int last = cornerVerts[edge[0]]; + for (int i = 0; i <= middle; ++i) { + const int next = GetEdgeVert(maxEdge, i); + triVert.push_back({cornerVerts[edge[3]], last, next}); + last = next; + } + last = cornerVerts[edge[1]]; + for (int i = edgeAdded[maxEdge] - 1; i >= middle; --i) { + const int next = GetEdgeVert(maxEdge, i); + triVert.push_back({cornerVerts[edge[2]], next, last}); + last = next; + } + } else { + int sideVert = cornerVerts[0]; // initial value is unused + for (const int j : {1, 2}) { + const int side = (corner + j) % 4; + if (j == 2 && edgeAdded[side] > 0) { + triVert.push_back( + {cornerVerts[side], GetEdgeVert(side, 0), sideVert}); + } else { + sideVert = cornerVerts[side]; + } + for (int i = 0; i < edgeAdded[side]; ++i) { + const int nextVert = GetEdgeVert(side, i); + triVert.push_back({cornerVerts[corner], sideVert, nextVert}); + sideVert = nextVert; + } + if (j == 2 || edgeAdded[side] == 0) { + triVert.push_back({cornerVerts[corner], sideVert, + cornerVerts[(corner + j + 1) % 4]}); + } + } + } + return; + } + // recursively partition + const int partitions = 1 + glm::min(edgeAdded[1], edgeAdded[3]); + glm::ivec4 newCornerVerts = {cornerVerts[1], -1, -1, cornerVerts[0]}; + glm::ivec4 newEdgeOffsets = { + edgeOffsets[1], -1, GetEdgeVert(3, edgeAdded[3] + 1), edgeOffsets[0]}; + glm::ivec4 newEdgeAdded = {0, -1, 0, edgeAdded[0]}; + glm::bvec4 newEdgeFwd = {edgeFwd[1], true, edgeFwd[3], edgeFwd[0]}; + + for (int i = 1; i < partitions; ++i) { + const int cornerOffset1 = (edgeAdded[1] * i) / partitions; + const int cornerOffset3 = + edgeAdded[3] - 1 - (edgeAdded[3] * i) / partitions; + const int nextOffset1 = GetEdgeVert(1, cornerOffset1 + 1); + const int nextOffset3 = GetEdgeVert(3, cornerOffset3 + 1); + const int added = glm::round(glm::mix( + (float)edgeAdded[0], (float)edgeAdded[2], (float)i / partitions)); + + newCornerVerts[1] = GetEdgeVert(1, cornerOffset1); + newCornerVerts[2] = GetEdgeVert(3, cornerOffset3); + newEdgeAdded[0] = std::abs(nextOffset1 - newEdgeOffsets[0]) - 1; + newEdgeAdded[1] = added; + newEdgeAdded[2] = std::abs(nextOffset3 - newEdgeOffsets[2]) - 1; + newEdgeOffsets[1] = vertBary.size(); + newEdgeOffsets[2] = nextOffset3; + + for (int j = 0; j < added; ++j) { + vertBary.push_back(glm::mix(vertBary[newCornerVerts[1]], + vertBary[newCornerVerts[2]], + (j + 1.0f) / (added + 1.0f))); + } + + PartitionQuad(triVert, vertBary, newCornerVerts, newEdgeOffsets, + newEdgeAdded, newEdgeFwd); + + newCornerVerts[0] = newCornerVerts[1]; + newCornerVerts[3] = newCornerVerts[2]; + newEdgeAdded[3] = newEdgeAdded[1]; + newEdgeOffsets[0] = nextOffset1; + newEdgeOffsets[3] = newEdgeOffsets[1] + newEdgeAdded[1] - 1; + newEdgeFwd[3] = false; + } + + newCornerVerts[1] = cornerVerts[2]; + newCornerVerts[2] = cornerVerts[3]; + newEdgeOffsets[1] = edgeOffsets[2]; + newEdgeAdded[0] = + edgeAdded[1] - std::abs(newEdgeOffsets[0] - edgeOffsets[1]); + newEdgeAdded[1] = edgeAdded[2]; + newEdgeAdded[2] = std::abs(newEdgeOffsets[2] - edgeOffsets[3]) - 1; + newEdgeOffsets[2] = edgeOffsets[3]; + newEdgeFwd[1] = edgeFwd[2]; + + PartitionQuad(triVert, vertBary, newCornerVerts, newEdgeOffsets, + newEdgeAdded, newEdgeFwd); + } +}; +} // namespace + +namespace manifold { + +/** + * Returns the tri side index (0-2) connected to the other side of this quad if + * this tri is part of a quad, or -1 otherwise. + */ +int Manifold::Impl::GetNeighbor(int tri) const { + int neighbor = -1; + for (const int i : {0, 1, 2}) { + if (IsMarkedInsideQuad(3 * tri + i)) { + neighbor = neighbor == -1 ? i : -2; + } + } + return neighbor; +} + +/** + * For the given triangle index, returns either the three halfedge indices of + * that triangle and halfedges[3] = -1, or if the triangle is part of a quad, it + * returns those four indices. If the triangle is part of a quad and is not the + * lower of the two triangle indices, it returns all -1s. + */ +glm::ivec4 Manifold::Impl::GetHalfedges(int tri) const { + glm::ivec4 halfedges(-1); + for (const int i : {0, 1, 2}) { + halfedges[i] = 3 * tri + i; + } + const int neighbor = GetNeighbor(tri); + if (neighbor >= 0) { // quad + const int pair = halfedge_[3 * tri + neighbor].pairedHalfedge; + if (pair / 3 < tri) { + return glm::ivec4(-1); // only process lower tri index + } + glm::ivec2 otherHalf; + otherHalf[0] = NextHalfedge(pair); + otherHalf[1] = NextHalfedge(otherHalf[0]); + halfedges[neighbor] = otherHalf[0]; + if (neighbor == 2) { + halfedges[3] = otherHalf[1]; + } else if (neighbor == 1) { + halfedges[3] = halfedges[2]; + halfedges[2] = otherHalf[1]; + } else { + halfedges[3] = halfedges[2]; + halfedges[2] = halfedges[1]; + halfedges[1] = otherHalf[1]; + } + } + return halfedges; +} + +/** + * Returns the BaryIndices, which gives the tri and indices (0-3), such that + * GetHalfedges(val.tri)[val.start4] points back to this halfedge, and val.end4 + * will point to the next one. This function handles this for both triangles and + * quads. Returns {-1, -1, -1} if the edge is the interior of a quad. + */ +Manifold::Impl::BaryIndices Manifold::Impl::GetIndices(int halfedge) const { + int tri = halfedge / 3; + int idx = halfedge % 3; + const int neighbor = GetNeighbor(tri); + if (idx == neighbor) { + return {-1, -1, -1}; + } + + if (neighbor < 0) { // tri + return {tri, idx, Next3(idx)}; + } else { // quad + const int pair = halfedge_[3 * tri + neighbor].pairedHalfedge; + if (pair / 3 < tri) { + tri = pair / 3; + const int j = pair % 3; + idx = Next3(neighbor) == idx ? j : (j + 1) % 4; + } else if (idx > neighbor) { + ++idx; + } + return {tri, idx, (idx + 1) % 4}; + } +} + +/** + * Retained verts are part of several triangles, and it doesn't matter which one + * the vertBary refers to. Here, whichever is last will win and it's done on the + * CPU for simplicity for now. Using AtomicCAS on .tri should work for a GPU + * version if desired. + */ +void Manifold::Impl::FillRetainedVerts(Vec& vertBary) const { + const int numTri = halfedge_.size() / 3; + for (int tri = 0; tri < numTri; ++tri) { + for (const int i : {0, 1, 2}) { + const BaryIndices indices = GetIndices(3 * tri + i); + if (indices.start4 < 0) continue; // skip quad interiors + glm::vec4 uvw(0); + uvw[indices.start4] = 1; + vertBary[halfedge_[3 * tri + i].startVert] = {indices.tri, uvw}; + } + } +} + +/** + * Split each edge into n pieces as defined by calling the edgeDivisions + * function, and sub-triangulate each triangle accordingly. This function + * doesn't run Finish(), as that is expensive and it'll need to be run after + * the new vertices have moved, which is a likely scenario after refinement + * (smoothing). + */ +Vec Manifold::Impl::Subdivide( + std::function edgeDivisions) { + Vec edges = CreateTmpEdges(halfedge_); + const int numVert = NumVert(); + const int numEdge = edges.size(); + const int numTri = NumTri(); + Vec half2Edge(2 * numEdge); + auto policy = autoPolicy(numEdge); + for_each_n(policy, zip(countAt(0), edges.begin()), numEdge, + ReindexHalfedge({half2Edge, halfedge_})); + + Vec faceHalfedges(numTri); + for_each_n(policy, zip(faceHalfedges.begin(), countAt(0)), numTri, + [this](thrust::tuple inOut) { + glm::ivec4& halfedges = thrust::get<0>(inOut); + const int tri = thrust::get<1>(inOut); + halfedges = GetHalfedges(tri); + }); + + Vec edgeAdded(numEdge); + for_each_n(policy, zip(edgeAdded.begin(), edges.cbegin()), numEdge, + [edgeDivisions, this](thrust::tuple inOut) { + int& divisions = thrust::get<0>(inOut); + const TmpEdge edge = thrust::get<1>(inOut); + if (IsMarkedInsideQuad(edge.halfedgeIdx)) { + divisions = 0; + return; + } + const glm::vec3 vec = + vertPos_[edge.first] - vertPos_[edge.second]; + divisions = edgeDivisions(vec); + }); + + Vec edgeOffset(numEdge); + exclusive_scan(policy, edgeAdded.begin(), edgeAdded.end(), edgeOffset.begin(), + numVert); + + Vec vertBary(edgeOffset.back() + edgeAdded.back()); + const int totalEdgeAdded = vertBary.size() - numVert; + FillRetainedVerts(vertBary); + for_each_n(policy, zip(edges.begin(), edgeAdded.begin(), edgeOffset.begin()), + numEdge, [this, &vertBary](thrust::tuple in) { + const TmpEdge edge = thrust::get<0>(in); + const int n = thrust::get<1>(in); + const int offset = thrust::get<2>(in); + + const BaryIndices indices = GetIndices(edge.halfedgeIdx); + if (indices.tri < 0) { + return; // inside quad + } + const float frac = 1.0f / (n + 1); + + for (int i = 0; i < n; ++i) { + glm::vec4 uvw(0); + uvw[indices.end4] = (i + 1) * frac; + uvw[indices.start4] = 1 - uvw[indices.end4]; + vertBary[offset + i].uvw = uvw; + vertBary[offset + i].tri = indices.tri; + } + }); + + std::vector subTris(numTri); + for_each_n(policy, countAt(0), numTri, + [this, &subTris, &half2Edge, &edgeAdded, &faceHalfedges](int tri) { + const glm::ivec4 halfedges = faceHalfedges[tri]; + glm::ivec4 divisions(0); + for (const int i : {0, 1, 2, 3}) { + if (halfedges[i] >= 0) { + divisions[i] = edgeAdded[half2Edge[halfedges[i]]] + 1; + } + } + subTris[tri] = Partition::GetPartition(divisions); + }); + + Vec triOffset(numTri); + auto numSubTris = thrust::make_transform_iterator( + subTris.begin(), + [](const Partition& part) { return part.triVert.size(); }); + exclusive_scan(policy, numSubTris, numSubTris + numTri, triOffset.begin(), 0); + + Vec interiorOffset(numTri); + auto numInterior = thrust::make_transform_iterator( + subTris.begin(), + [](const Partition& part) { return part.NumInterior(); }); + exclusive_scan(policy, numInterior, numInterior + numTri, + interiorOffset.begin(), vertBary.size()); + + Vec triVerts(triOffset.back() + subTris.back().triVert.size()); + vertBary.resize(interiorOffset.back() + subTris.back().NumInterior()); + Vec triRef(triVerts.size()); + for_each_n( + policy, countAt(0), numTri, + [this, &triVerts, &triRef, &vertBary, &subTris, &edgeOffset, &half2Edge, + &triOffset, &interiorOffset, &faceHalfedges](int tri) { + const glm::ivec4 halfedges = faceHalfedges[tri]; + if (halfedges[0] < 0) return; + glm::ivec4 tri3; + glm::ivec4 edgeOffsets; + glm::bvec4 edgeFwd; + for (const int i : {0, 1, 2, 3}) { + if (halfedges[i] < 0) { + tri3[i] = -1; + continue; + } + const Halfedge& halfedge = halfedge_[halfedges[i]]; + tri3[i] = halfedge.startVert; + edgeOffsets[i] = edgeOffset[half2Edge[halfedges[i]]]; + edgeFwd[i] = halfedge.IsForward(); + } + + Vec newTris = subTris[tri].Reindex( + tri3, edgeOffsets, edgeFwd, interiorOffset[tri]); + copy(ExecutionPolicy::Seq, newTris.begin(), newTris.end(), + triVerts.begin() + triOffset[tri]); + auto start = triRef.begin() + triOffset[tri]; + fill(ExecutionPolicy::Seq, start, start + newTris.size(), + meshRelation_.triRef[tri]); + + const glm::ivec4 idx = subTris[tri].idx; + const glm::ivec4 vIdx = + halfedges[3] >= 0 || idx[1] == Next3(idx[0]) + ? idx + : glm::ivec4(idx[2], idx[0], idx[1], idx[3]); + glm::ivec4 rIdx; + for (const int i : {0, 1, 2, 3}) { + rIdx[vIdx[i]] = i; + } + + const auto& subBary = subTris[tri].vertBary; + transform(ExecutionPolicy::Seq, + subBary.begin() + subTris[tri].InteriorOffset(), + subBary.end(), vertBary.begin() + interiorOffset[tri], + [tri, rIdx](glm::vec4 bary) { + return Barycentric({tri, + {bary[rIdx[0]], bary[rIdx[1]], + bary[rIdx[2]], bary[rIdx[3]]}}); + }); + }); + meshRelation_.triRef = triRef; + + Vec newVertPos(vertBary.size()); + for_each_n( + policy, zip(newVertPos.begin(), vertBary.begin()), vertBary.size(), + [this, &faceHalfedges](thrust::tuple inOut) { + glm::vec3& pos = thrust::get<0>(inOut); + const Barycentric bary = thrust::get<1>(inOut); + + const glm::ivec4 halfedges = faceHalfedges[bary.tri]; + if (halfedges[3] < 0) { + glm::mat3 triPos; + for (const int i : {0, 1, 2}) { + triPos[i] = vertPos_[halfedge_[halfedges[i]].startVert]; + } + pos = triPos * glm::vec3(bary.uvw); + } else { + glm::mat4x3 quadPos; + for (const int i : {0, 1, 2, 3}) { + quadPos[i] = vertPos_[halfedge_[halfedges[i]].startVert]; + } + pos = quadPos * bary.uvw; + } + }); + vertPos_ = newVertPos; + + faceNormal_.resize(0); + + if (meshRelation_.numProp > 0) { + const int numPropVert = NumPropVert(); + const int addedVerts = NumVert() - numVert; + const int propOffset = numPropVert - numVert; + Vec prop(meshRelation_.numProp * + (numPropVert + addedVerts + totalEdgeAdded)); + + // copy retained prop verts + copy(policy, meshRelation_.properties.begin(), + meshRelation_.properties.end(), prop.begin()); + + // copy interior prop verts and forward edge prop verts + for_each_n( + policy, zip(countAt(numPropVert), vertBary.begin() + numVert), + addedVerts, + [this, &prop, &faceHalfedges](thrust::tuple in) { + const int vert = thrust::get<0>(in); + const Barycentric bary = thrust::get<1>(in); + const glm::ivec4 halfedges = faceHalfedges[bary.tri]; + auto& rel = meshRelation_; + + for (int p = 0; p < rel.numProp; ++p) { + if (halfedges[3] < 0) { + glm::vec3 triProp; + for (const int i : {0, 1, 2}) { + triProp[i] = rel.properties[rel.triProperties[bary.tri][i] * + rel.numProp + + p]; + } + prop[vert * rel.numProp + p] = + glm::dot(triProp, glm::vec3(bary.uvw)); + } else { + glm::vec4 quadProp; + for (const int i : {0, 1, 2, 3}) { + const int tri = halfedges[i] / 3; + const int j = halfedges[i] % 3; + quadProp[i] = + rel.properties[rel.triProperties[tri][j] * rel.numProp + p]; + } + prop[vert * rel.numProp + p] = glm::dot(quadProp, bary.uvw); + } + } + }); + + // copy backward edge prop verts + for_each_n( + policy, zip(edges.begin(), edgeAdded.begin(), edgeOffset.begin()), + numEdge, + [this, &prop, propOffset, + addedVerts](thrust::tuple in) { + const TmpEdge edge = thrust::get<0>(in); + const int n = thrust::get<1>(in); + const int offset = thrust::get<2>(in) + propOffset + addedVerts; + auto& rel = meshRelation_; + + const float frac = 1.0f / (n + 1); + const int halfedgeIdx = halfedge_[edge.halfedgeIdx].pairedHalfedge; + const int v0 = halfedgeIdx % 3; + const int tri = halfedgeIdx / 3; + const int prop0 = rel.triProperties[tri][v0]; + const int prop1 = rel.triProperties[tri][Next3(v0)]; + for (int i = 0; i < n; ++i) { + for (int p = 0; p < rel.numProp; ++p) { + prop[(offset + i) * rel.numProp + p] = glm::mix( + rel.properties[prop0 * rel.numProp + p], + rel.properties[prop1 * rel.numProp + p], (i + 1) * frac); + } + } + }); + + Vec triProp(triVerts.size()); + for_each_n(policy, countAt(0), numTri, + [this, &triProp, &subTris, &edgeOffset, &half2Edge, &triOffset, + &interiorOffset, &faceHalfedges, propOffset, + addedVerts](const int tri) { + const glm::ivec4 halfedges = faceHalfedges[tri]; + if (halfedges[0] < 0) return; + + auto& rel = meshRelation_; + glm::ivec4 tri3; + glm::ivec4 edgeOffsets; + glm::bvec4 edgeFwd(true); + for (const int i : {0, 1, 2, 3}) { + if (halfedges[i] < 0) { + tri3[i] = -1; + continue; + } + const int thisTri = halfedges[i] / 3; + const int j = halfedges[i] % 3; + const Halfedge& halfedge = halfedge_[halfedges[i]]; + tri3[i] = rel.triProperties[thisTri][j]; + edgeOffsets[i] = edgeOffset[half2Edge[halfedges[i]]]; + if (!halfedge.IsForward()) { + const int pairTri = halfedge.pairedHalfedge / 3; + const int k = halfedge.pairedHalfedge % 3; + if (rel.triProperties[pairTri][k] != + rel.triProperties[thisTri][Next3(j)] || + rel.triProperties[pairTri][Next3(k)] != + rel.triProperties[thisTri][j]) { + edgeOffsets[i] += addedVerts; + } else { + edgeFwd[i] = false; + } + } + } + + Vec newTris = subTris[tri].Reindex( + tri3, edgeOffsets + propOffset, edgeFwd, + interiorOffset[tri] + propOffset); + copy(ExecutionPolicy::Seq, newTris.begin(), newTris.end(), + triProp.begin() + triOffset[tri]); + }); + + meshRelation_.properties = prop; + meshRelation_.triProperties = triProp; + } + + CreateHalfedges(triVerts); + + return vertBary; +} + +} // namespace manifold diff --git a/thirdparty/manifold/src/polygon/include/polygon.h b/thirdparty/manifold/src/polygon/include/polygon.h new file mode 100644 index 000000000000..8e5f11f0bd0c --- /dev/null +++ b/thirdparty/manifold/src/polygon/include/polygon.h @@ -0,0 +1,51 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include + +#include "public.h" + +namespace manifold { + +/** @addtogroup Private + * @{ + */ + +/** + * Polygon vertex. + */ +struct PolyVert { + /// X-Y position + glm::vec2 pos; + /// ID or index into another vertex vector + int idx; +}; + +using SimplePolygonIdx = std::vector; +using PolygonsIdx = std::vector; + +std::vector TriangulateIdx(const PolygonsIdx &polys, + float precision = -1); +/** @} */ + +/** @ingroup Connections + * @{ + */ +std::vector Triangulate(const Polygons &polygons, + float precision = -1); + +ExecutionParams &PolygonParams(); +/** @} */ +} // namespace manifold \ No newline at end of file diff --git a/thirdparty/manifold/src/polygon/src/polygon.cpp b/thirdparty/manifold/src/polygon/src/polygon.cpp new file mode 100644 index 000000000000..a8df49bf419c --- /dev/null +++ b/thirdparty/manifold/src/polygon/src/polygon.cpp @@ -0,0 +1,877 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "polygon.h" + +#include +#include +#include + +#include "optional_assert.h" +#include "utils.h" + +namespace { +using namespace manifold; + +static ExecutionParams params; + +constexpr float kBest = -std::numeric_limits::infinity(); + +// it seems that MSVC cannot optimize glm::determinant(glm::mat2(a, b)) +constexpr float determinant2x2(glm::vec2 a, glm::vec2 b) { + return a.x * b.y - a.y * b.x; +} + +#ifdef MANIFOLD_DEBUG +struct PolyEdge { + int startVert, endVert; +}; + +std::vector Polygons2Edges(const PolygonsIdx &polys) { + std::vector halfedges; + for (const auto &poly : polys) { + for (int i = 1; i < poly.size(); ++i) { + halfedges.push_back({poly[i - 1].idx, poly[i].idx}); + } + halfedges.push_back({poly.back().idx, poly[0].idx}); + } + return halfedges; +} + +std::vector Triangles2Edges( + const std::vector &triangles) { + std::vector halfedges; + halfedges.reserve(triangles.size() * 3); + for (const glm::ivec3 &tri : triangles) { + halfedges.push_back({tri[0], tri[1]}); + halfedges.push_back({tri[1], tri[2]}); + halfedges.push_back({tri[2], tri[0]}); + } + return halfedges; +} + +void CheckTopology(const std::vector &halfedges) { + ASSERT(halfedges.size() % 2 == 0, topologyErr, "Odd number of halfedges."); + size_t n_edges = halfedges.size() / 2; + std::vector forward(halfedges.size()), backward(halfedges.size()); + + auto end = std::copy_if(halfedges.begin(), halfedges.end(), forward.begin(), + [](PolyEdge e) { return e.endVert > e.startVert; }); + ASSERT(std::distance(forward.begin(), end) == n_edges, topologyErr, + "Half of halfedges should be forward."); + forward.resize(n_edges); + + end = std::copy_if(halfedges.begin(), halfedges.end(), backward.begin(), + [](PolyEdge e) { return e.endVert < e.startVert; }); + ASSERT(std::distance(backward.begin(), end) == n_edges, topologyErr, + "Half of halfedges should be backward."); + backward.resize(n_edges); + + std::for_each(backward.begin(), backward.end(), + [](PolyEdge &e) { std::swap(e.startVert, e.endVert); }); + auto cmp = [](const PolyEdge &a, const PolyEdge &b) { + return a.startVert < b.startVert || + (a.startVert == b.startVert && a.endVert < b.endVert); + }; + std::stable_sort(forward.begin(), forward.end(), cmp); + std::stable_sort(backward.begin(), backward.end(), cmp); + for (int i = 0; i < n_edges; ++i) { + ASSERT(forward[i].startVert == backward[i].startVert && + forward[i].endVert == backward[i].endVert, + topologyErr, "Not manifold."); + } +} + +void CheckTopology(const std::vector &triangles, + const PolygonsIdx &polys) { + std::vector halfedges = Triangles2Edges(triangles); + std::vector openEdges = Polygons2Edges(polys); + for (PolyEdge e : openEdges) { + halfedges.push_back({e.endVert, e.startVert}); + } + CheckTopology(halfedges); +} + +void CheckGeometry(const std::vector &triangles, + const PolygonsIdx &polys, float precision) { + std::unordered_map vertPos; + for (const auto &poly : polys) { + for (int i = 0; i < poly.size(); ++i) { + vertPos[poly[i].idx] = poly[i].pos; + } + } + ASSERT(std::all_of(triangles.begin(), triangles.end(), + [&vertPos, precision](const glm::ivec3 &tri) { + return CCW(vertPos[tri[0]], vertPos[tri[1]], + vertPos[tri[2]], precision) >= 0; + }), + geometryErr, "triangulation is not entirely CCW!"); +} + +void Dump(const PolygonsIdx &polys) { + for (auto poly : polys) { + std::cout << "polys.push_back({" << std::setprecision(9) << std::endl; + for (auto v : poly) { + std::cout << " {" << v.pos.x << ", " << v.pos.y << "}, //" + << std::endl; + } + std::cout << "});" << std::endl; + } + for (auto poly : polys) { + std::cout << "show(array([" << std::endl; + for (auto v : poly) { + std::cout << " [" << v.pos.x << ", " << v.pos.y << "]," << std::endl; + } + std::cout << "]))" << std::endl; + } +} + +void PrintFailure(const std::exception &e, const PolygonsIdx &polys, + std::vector &triangles, float precision) { + std::cout << "-----------------------------------" << std::endl; + std::cout << "Triangulation failed! Precision = " << precision << std::endl; + std::cout << e.what() << std::endl; + Dump(polys); + std::cout << "produced this triangulation:" << std::endl; + for (int j = 0; j < triangles.size(); ++j) { + std::cout << triangles[j][0] << ", " << triangles[j][1] << ", " + << triangles[j][2] << std::endl; + } +} + +#define PRINT(msg) \ + if (params.verbose) std::cout << msg << std::endl; +#else +#define PRINT(msg) +#endif + +/** + * Ear-clipping triangulator based on David Eberly's approach from Geometric + * Tools, but adjusted to handle epsilon-valid polygons, and including a + * fallback that ensures a manifold triangulation even for overlapping polygons. + * This is an O(n^2) algorithm, but hopefully this is not a big problem as the + * number of edges in a given polygon is generally much less than the number of + * triangles in a mesh, and relatively few faces even need triangulation. + * + * The main adjustments for robustness involve clipping the sharpest ears first + * (a known technique to get higher triangle quality), and doing an exhaustive + * search to determine ear convexity exactly if the first geometric result is + * within precision. + */ + +class EarClip { + public: + EarClip(const PolygonsIdx &polys, float precision) : precision_(precision) { + ZoneScoped; + + int numVert = 0; + for (const SimplePolygonIdx &poly : polys) { + numVert += poly.size(); + } + polygon_.reserve(numVert + 2 * polys.size()); + + std::vector starts = Initialize(polys); + + for (VertItr v = polygon_.begin(); v != polygon_.end(); ++v) { + ClipIfDegenerate(v); + } + + for (const VertItr first : starts) { + FindStart(first); + } + } + + std::vector Triangulate() { + ZoneScoped; + + for (const VertItr start : holes_) { + CutKeyhole(start); + } + + for (const VertItr start : simples_) { + TriangulatePoly(start); + } + + return triangles_; + } + + float GetPrecision() const { return precision_; } + + private: + struct Vert; + typedef std::vector::iterator VertItr; + struct MaxX { + bool operator()(const VertItr &a, const VertItr &b) const { + return a->pos.x > b->pos.x; + } + }; + struct MinCost { + bool operator()(const VertItr &a, const VertItr &b) const { + return a->cost < b->cost; + } + }; + typedef std::set::iterator qItr; + + // The flat list where all the Verts are stored. Not used much for traversal. + std::vector polygon_; + // The set of right-most starting points, one for each negative-area contour. + std::multiset holes_; + // The set of starting points, one for each positive-area contour. + std::vector outers_; + // The set of starting points, one for each simple polygon. + std::vector simples_; + // Maps each hole (by way of starting point) to its bounding box. + std::map hole2BBox_; + // A priority queue of valid ears - the multiset allows them to be updated. + std::multiset earsQueue_; + // The output triangulation. + std::vector triangles_; + // Working precision: max of float error and input value. + float precision_; + + // A circularly-linked list representing the polygon(s) that still need to be + // triangulated. This gets smaller as ears are clipped until it degenerates to + // two points and terminates. + struct Vert { + int mesh_idx; + float cost; + qItr ear; + glm::vec2 pos, rightDir; + VertItr left, right; + + // Shorter than half of precision, to be conservative so that it doesn't + // cause CW triangles that exceed precision due to rounding error. + bool IsShort(float precision) const { + const glm::vec2 edge = right->pos - pos; + return glm::dot(edge, edge) * 4 < precision * precision; + } + + // Like CCW, returns 1 if v is on the inside of the angle formed at this + // vert, -1 on the outside, and 0 if it's within precision of the boundary. + // Ensure v is more than precision from pos, as case this will not return 0. + int Interior(glm::vec2 v, float precision) const { + const glm::vec2 diff = v - pos; + if (glm::dot(diff, diff) < precision * precision) { + return 0; + } + return CCW(pos, left->pos, right->pos, precision) + + CCW(pos, right->pos, v, precision) + + CCW(pos, v, left->pos, precision); + } + + // Returns true if Vert is on the inside of the edge that goes from tail to + // tail->right. This will walk the edges if necessary until a clear answer + // is found (beyond precision). If toLeft is true, this Vert will walk its + // edges to the left. This should be chosen so that the edges walk in the + // same general direction - tail always walks to the right. + bool InsideEdge(VertItr tail, float precision, bool toLeft) const { + const float p2 = precision * precision; + VertItr nextL = left->right; + VertItr nextR = tail->right; + VertItr center = tail; + VertItr last = center; + + while (nextL != nextR && tail != nextR && + nextL != (toLeft ? right : left)) { + const glm::vec2 edgeL = nextL->pos - center->pos; + const float l2 = glm::dot(edgeL, edgeL); + if (l2 <= p2) { + nextL = toLeft ? nextL->left : nextL->right; + continue; + } + + const glm::vec2 edgeR = nextR->pos - center->pos; + const float r2 = glm::dot(edgeR, edgeR); + if (r2 <= p2) { + nextR = nextR->right; + continue; + } + + const glm::vec2 vecLR = nextR->pos - nextL->pos; + const float lr2 = glm::dot(vecLR, vecLR); + if (lr2 <= p2) { + last = center; + center = nextL; + nextL = toLeft ? nextL->left : nextL->right; + if (nextL == nextR) break; + nextR = nextR->right; + continue; + } + + int convexity = CCW(nextL->pos, center->pos, nextR->pos, precision); + if (center != last) { + convexity += CCW(last->pos, center->pos, nextL->pos, precision) + + CCW(nextR->pos, center->pos, last->pos, precision); + } + if (convexity != 0) return convexity > 0; + + if (l2 < r2) { + center = nextL; + nextL = toLeft ? nextL->left : nextL->right; + } else { + center = nextR; + nextR = nextR->right; + } + last = center; + } + // The whole polygon is degenerate - consider this to be convex. + return true; + } + + // A major key to robustness is to only clip convex ears, but this is + // difficult to determine when an edge is folded back on itself. This + // function walks down the kinks in a degenerate portion of a polygon until + // it finds a clear geometric result. In the vast majority of cases the loop + // will only need one or two iterations. + bool IsConvex(float precision) const { + return left->InsideEdge(left->right, precision, true); + } + + // This function is the core of finding a proper place to keyhole. It runs + // on this Vert, which represents the edge from this to right. It returns + // an iterator to the vert to connect to (either this or right) and a bool + // denoting if the edge is a valid option for a keyhole (must be upwards and + // cross the start.y-value). + // + // If the edge terminates within the precision band, it checks the next edge + // to ensure validity. No while loop is necessary because short edges have + // already been removed. The onTop value is 1 if the start.y-value is at the + // top of the polygon's bounding box, -1 if it's at the bottom, and 0 + // otherwise. This allows proper handling of horizontal edges. + std::pair InterpY2X(glm::vec2 start, int onTop, + float precision) const { + const auto none = std::make_pair(left, false); + if (pos.y < start.y && right->pos.y >= start.y) { + return std::make_pair(left->right, true); + } else if (pos.x > start.x - precision && pos.y > start.y - precision && + pos.y < start.y + precision && + Interior(start, precision) >= 0) { + if (onTop > 0 && left->pos.x < pos.x && + left->pos.y > start.y - precision) { + return none; + } + if (onTop < 0 && right->pos.x < pos.x && + right->pos.y < start.y + precision) { + return none; + } + const VertItr p = pos.x < right->pos.x ? right : left->right; + return std::make_pair(p, true); + } + // Edge does not cross start.y going up + return none; + } + + // This finds the cost of this vert relative to one of the two closed sides + // of the ear. Points are valid even when they touch, so long as their edge + // goes to the outside. No need to check the other side, since all verts are + // processed in the EarCost loop. + float SignedDist(VertItr v, glm::vec2 unit, float precision) const { + float d = determinant2x2(unit, v->pos - pos); + if (std::abs(d) < precision) { + d = glm::max(d, determinant2x2(unit, v->right->pos - pos)); + d = glm::max(d, determinant2x2(unit, v->left->pos - pos)); + } + return d; + } + + // Find the cost of Vert v within this ear, where openSide is the unit + // vector from Verts right to left - passed in for reuse. + float Cost(VertItr v, glm::vec2 openSide, float precision) const { + float cost = glm::min(SignedDist(v, rightDir, precision), + SignedDist(v, left->rightDir, precision)); + + const float openCost = determinant2x2(openSide, v->pos - right->pos); + return glm::min(cost, openCost); + } + + // For verts outside the ear, apply a cost based on the Delaunay condition + // to aid in prioritization and produce cleaner triangulations. This doesn't + // affect robustness, but may be adjusted to improve output. + float DelaunayCost(glm::vec2 diff, float scale, float precision) const { + return -precision - scale * glm::dot(diff, diff); + } + + // This is the O(n^2) part of the algorithm, checking this ear against every + // Vert to ensure none are inside. It may be possible to improve performance + // by using the Collider to get it down to nlogn or doing some + // parallelization, but that may be more trouble than it's worth. + // + // Think of a cost as vaguely a distance metric - 0 is right on the edge of + // being invalid. cost > precision is definitely invalid. Cost < -precision + // is definitely valid, so all improvement costs are designed to always give + // values < -precision so they will never affect validity. The first + // totalCost is designed to give priority to sharper angles. Any cost < (-1 + // - precision) has satisfied the Delaunay condition. + float EarCost(float precision) const { + glm::vec2 openSide = left->pos - right->pos; + const glm::vec2 center = 0.5f * (left->pos + right->pos); + const float scale = 4 / glm::dot(openSide, openSide); + openSide = glm::normalize(openSide); + + float totalCost = glm::dot(left->rightDir, rightDir) - 1 - precision; + if (CCW(pos, left->pos, right->pos, precision) == 0) { + // Clip folded ears first + return totalCost < -1 ? kBest : 0; + } + VertItr test = right->right; + auto lid = left->mesh_idx; + auto rid = right->mesh_idx; + while (test != left) { + if (test->mesh_idx != mesh_idx && test->mesh_idx != lid && + test->mesh_idx != rid) { // Skip duplicated verts + float cost = Cost(test, openSide, precision); + if (cost < -precision) { + cost = DelaunayCost(test->pos - center, scale, precision); + } + totalCost = glm::max(totalCost, cost); + } + + test = test->right; + } + return totalCost; + } + + void PrintVert() const { +#ifdef MANIFOLD_DEBUG + if (!params.verbose) return; + std::cout << "vert: " << mesh_idx << ", left: " << left->mesh_idx + << ", right: " << right->mesh_idx << ", cost: " << cost + << std::endl; +#endif + } + }; + + glm::vec2 SafeNormalize(glm::vec2 v) const { + glm::vec2 n = glm::normalize(v); + return glm::isfinite(n.x) ? n : glm::vec2(0, 0); + } + + // This function and JoinPolygons are the only functions that affect the + // circular list data structure. This helps ensure it remains circular. + void Link(VertItr left, VertItr right) const { + left->right = right; + right->left = left; + left->rightDir = SafeNormalize(right->pos - left->pos); + } + + // When an ear vert is clipped, its neighbors get linked, so they get unlinked + // from it, but it is still linked to them. + bool Clipped(VertItr v) const { return v->right->left != v; } + + // Apply func to each un-clipped vert in a polygon and return an un-clipped + // vert. + VertItr Loop(VertItr first, std::function func) { + VertItr v = first; + do { + if (Clipped(v)) { + // Update first to an un-clipped vert so we will return to it instead + // of infinite-looping. + first = v->right->left; + if (!Clipped(first)) { + v = first; + if (v->right == v->left) { + return polygon_.end(); + } + func(v); + } + } else { + if (v->right == v->left) { + return polygon_.end(); + } + func(v); + } + v = v->right; + } while (v != first); + return v; + } + + // Remove this vert from the circular list and output a corresponding + // triangle. + void ClipEar(VertItr ear) { + Link(ear->left, ear->right); + if (ear->left->mesh_idx != ear->mesh_idx && + ear->mesh_idx != ear->right->mesh_idx && + ear->right->mesh_idx != ear->left->mesh_idx) { + // Filter out topological degenerates, which can form in bad + // triangulations of polygons with holes, due to vert duplication. + triangles_.push_back( + {ear->left->mesh_idx, ear->mesh_idx, ear->right->mesh_idx}); +#ifdef MANIFOLD_DEBUG + if (params.verbose) { + std::cout << "output tri: " << ear->mesh_idx << ", " + << ear->right->mesh_idx << ", " << ear->left->mesh_idx + << std::endl; + } +#endif + } else { + PRINT("Topological degenerate!"); + } + } + + // If an ear will make a degenerate triangle, clip it early to avoid + // difficulty in key-holing. This function is recursive, as the process of + // clipping may cause the neighbors to degenerate. Reflex degenerates *must + // not* be clipped, unless they have a short edge. + void ClipIfDegenerate(VertItr ear) { + if (Clipped(ear)) { + return; + } + if (ear->left == ear->right) { + return; + } + if (ear->IsShort(precision_) || + (CCW(ear->left->pos, ear->pos, ear->right->pos, precision_) == 0 && + glm::dot(ear->left->pos - ear->pos, ear->right->pos - ear->pos) > 0 && + ear->IsConvex(precision_))) { + ClipEar(ear); + ClipIfDegenerate(ear->left); + ClipIfDegenerate(ear->right); + } + } + + // Build the circular list polygon structures. + std::vector Initialize(const PolygonsIdx &polys) { + std::vector starts; + float bound = 0; + for (const SimplePolygonIdx &poly : polys) { + auto vert = poly.begin(); + polygon_.push_back({vert->idx, 0.0f, earsQueue_.end(), vert->pos}); + const VertItr first = std::prev(polygon_.end()); + + bound = glm::max( + bound, glm::max(std::abs(first->pos.x), std::abs(first->pos.y))); + VertItr last = first; + // This is not the real rightmost start, but just an arbitrary vert for + // now to identify each polygon. + starts.push_back(first); + + for (++vert; vert != poly.end(); ++vert) { + bound = glm::max( + bound, glm::max(std::abs(vert->pos.x), std::abs(vert->pos.y))); + + polygon_.push_back({vert->idx, 0.0f, earsQueue_.end(), vert->pos}); + VertItr next = std::prev(polygon_.end()); + + Link(last, next); + last = next; + } + Link(last, first); + } + + if (precision_ < 0) precision_ = bound * kTolerance; + + // Slightly more than enough, since each hole can cause two extra triangles. + triangles_.reserve(polygon_.size() + 2 * starts.size()); + return starts; + } + + // Find the actual rightmost starts after degenerate removal. Also calculate + // the polygon bounding boxes. + void FindStart(VertItr first) { + const glm::vec2 origin = first->pos; + + VertItr start = first; + float maxX = -std::numeric_limits::infinity(); + Rect bBox; + // Kahan summation + double area = 0; + double areaCompensation = 0; + + auto AddPoint = [&](VertItr v) { + bBox.Union(v->pos); + const double area1 = + determinant2x2(v->pos - origin, v->right->pos - origin); + const double t1 = area + area1; + areaCompensation += (area - t1) + area1; + area = t1; + + if (!v->IsConvex(precision_) && v->pos.x > maxX) { + maxX = v->pos.x; + start = v; + } + }; + + if (Loop(first, AddPoint) == polygon_.end()) { + // No polygon left if all ears were degenerate and already clipped. + return; + } + + area += areaCompensation; + const glm::vec2 size = bBox.Size(); + const float minArea = precision_ * glm::max(size.x, size.y); + + if (glm::isfinite(maxX) && area < -minArea) { + holes_.insert(start); + hole2BBox_.insert({start, bBox}); + } else { + simples_.push_back(start); + if (area > minArea) { + outers_.push_back(start); + } + } + } + + // All holes must be key-holed (attached to an outer polygon) before ear + // clipping can commence. Instead of relying on sorting, which may be + // incorrect due to precision, we check for polygon edges both ahead and + // behind to ensure all valid options are found. + void CutKeyhole(const VertItr start) { + const Rect bBox = hole2BBox_[start]; + const int onTop = start->pos.y >= bBox.max.y - precision_ ? 1 + : start->pos.y <= bBox.min.y + precision_ ? -1 + : 0; + VertItr connector = polygon_.end(); + + auto CheckEdge = [&](VertItr edge) { + const std::pair pair = + edge->InterpY2X(start->pos, onTop, precision_); + if (pair.second && start->InsideEdge(pair.first, precision_, true) && + (connector == polygon_.end() || + (connector->pos.y < pair.first->pos.y + ? pair.first->InsideEdge(connector, precision_, false) + : !connector->InsideEdge(pair.first, precision_, false)))) { + connector = pair.first; + } + }; + + for (const VertItr first : outers_) { + Loop(first, CheckEdge); + } + + if (connector == polygon_.end()) { + PRINT("hole did not find an outer contour!"); + simples_.push_back(start); + return; + } + + connector = FindCloserBridge(start, connector, onTop); + + JoinPolygons(start, connector); + +#ifdef MANIFOLD_DEBUG + if (params.verbose) { + std::cout << "connected " << start->mesh_idx << " to " + << connector->mesh_idx << std::endl; + } +#endif + } + + // This converts the initial guess for the keyhole location into the final one + // and returns it. It does so by finding any reflex verts inside the triangle + // containing the best connection and the initial horizontal line. + VertItr FindCloserBridge(VertItr start, VertItr edge, int onTop) { + VertItr best = edge->pos.x > edge->right->pos.x ? edge : edge->right; + const float maxX = best->pos.x; + const float above = best->pos.y > start->pos.y ? 1 : -1; + + auto CheckVert = [&](VertItr vert) { + const float inside = above * CCW(start->pos, vert->pos, best->pos, 0); + if (vert->pos.x > start->pos.x - precision_ && + vert->pos.x < maxX + precision_ && + vert->pos.y * above > start->pos.y * above - precision_ && + (inside > 0 || (inside == 0 && vert->pos.x < best->pos.x)) && + vert->InsideEdge(edge, precision_, true) && + !vert->IsConvex(precision_)) { + if (vert->pos.y > start->pos.y - precision_ && + vert->pos.y < start->pos.y + precision_) { + if (onTop > 0 && vert->left->pos.x < vert->pos.x && + vert->left->pos.y > start->pos.y - precision_) { + return; + } + if (onTop < 0 && vert->right->pos.x < vert->pos.x && + vert->right->pos.y < start->pos.y + precision_) { + return; + } + } + best = vert; + } + }; + + for (const VertItr first : outers_) { + Loop(first, CheckVert); + } + + return best; + } + + // Creates a keyhole between the start vert of a hole and the connector vert + // of an outer polygon. To do this, both verts are duplicated and reattached. + // This process may create degenerate ears, so these are clipped if necessary + // to keep from confusing subsequent key-holing operations. + void JoinPolygons(VertItr start, VertItr connector) { + polygon_.push_back(*start); + const VertItr newStart = std::prev(polygon_.end()); + polygon_.push_back(*connector); + const VertItr newConnector = std::prev(polygon_.end()); + + start->right->left = newStart; + connector->left->right = newConnector; + Link(start, connector); + Link(newConnector, newStart); + + ClipIfDegenerate(start); + ClipIfDegenerate(newStart); + ClipIfDegenerate(connector); + ClipIfDegenerate(newConnector); + } + + // Recalculate the cost of the Vert v ear, updating it in the queue by + // removing and reinserting it. + void ProcessEar(VertItr v) { + if (v->ear != earsQueue_.end()) { + earsQueue_.erase(v->ear); + v->ear = earsQueue_.end(); + } + if (v->IsShort(precision_)) { + v->cost = kBest; + v->ear = earsQueue_.insert(v); + } else if (v->IsConvex(precision_)) { + v->cost = v->EarCost(precision_); + v->ear = earsQueue_.insert(v); + } + } + + // The main ear-clipping loop. This is called once for each simple polygon - + // all holes have already been key-holed and joined to an outer polygon. + void TriangulatePoly(VertItr start) { + ZoneScoped; + + // A simple polygon always creates two fewer triangles than it has verts. + int numTri = -2; + earsQueue_.clear(); + + auto QueueVert = [&](VertItr v) { + ProcessEar(v); + ++numTri; + v->PrintVert(); + }; + + VertItr v = Loop(start, QueueVert); + if (v == polygon_.end()) return; + Dump(v); + + while (numTri > 0) { + const qItr ear = earsQueue_.begin(); + if (ear != earsQueue_.end()) { + v = *ear; + // Cost should always be negative, generally < -precision. + v->PrintVert(); + earsQueue_.erase(ear); + } else { + PRINT("No ear found!"); + } + + ClipEar(v); + --numTri; + + ProcessEar(v->left); + ProcessEar(v->right); + // This is a backup vert that is used if the queue is empty (geometrically + // invalid polygon), to ensure manifoldness. + v = v->right; + } + + ASSERT(v->right == v->left, logicErr, "Triangulator error!"); + PRINT("Finished poly"); + } + + void Dump(VertItr start) const { +#ifdef MANIFOLD_DEBUG + if (!params.verbose) return; + VertItr v = start; + std::cout << "show(array([" << std::endl; + do { + std::cout << " [" << v->pos.x << ", " << v->pos.y << "],# " + << v->mesh_idx << ", cost: " << v->cost << std::endl; + v = v->right; + } while (v != start); + std::cout << " [" << v->pos.x << ", " << v->pos.y << "],# " << v->mesh_idx + << std::endl; + std::cout << "]))" << std::endl; +#endif + } +}; +} // namespace + +namespace manifold { + +/** + * @brief Triangulates a set of ε-valid polygons. If the input is not + * ε-valid, the triangulation may overlap, but will always return a + * manifold result that matches the input edge directions. + * + * @param polys The set of polygons, wound CCW and representing multiple + * polygons and/or holes. These have 2D-projected positions as well as + * references back to the original vertices. + * @param precision The value of ε, bounding the uncertainty of the + * input. + * @return std::vector The triangles, referencing the original + * vertex indicies. + */ +std::vector TriangulateIdx(const PolygonsIdx &polys, + float precision) { + std::vector triangles; + try { + EarClip triangulator(polys, precision); + triangles = triangulator.Triangulate(); +#ifdef MANIFOLD_DEBUG + if (params.intermediateChecks) { + CheckTopology(triangles, polys); + if (!params.processOverlaps) { + CheckGeometry(triangles, polys, 2 * triangulator.GetPrecision()); + } + } + } catch (const geometryErr &e) { + if (!params.suppressErrors) { + PrintFailure(e, polys, triangles, precision); + } + throw; + } catch (const std::exception &e) { + PrintFailure(e, polys, triangles, precision); + throw; +#else + } catch (const std::exception &e) { +#endif + } + return triangles; +} + +/** + * @brief Triangulates a set of ε-valid polygons. If the input is not + * ε-valid, the triangulation may overlap, but will always return a + * manifold result that matches the input edge directions. + * + * @param polygons The set of polygons, wound CCW and representing multiple + * polygons and/or holes. + * @param precision The value of ε, bounding the uncertainty of the + * input. + * @return std::vector The triangles, referencing the original + * polygon points in order. + */ +std::vector Triangulate(const Polygons &polygons, float precision) { + int idx = 0; + PolygonsIdx polygonsIndexed; + for (const auto &poly : polygons) { + SimplePolygonIdx simpleIndexed; + for (const glm::vec2 &polyVert : poly) { + simpleIndexed.push_back({polyVert, idx++}); + } + polygonsIndexed.push_back(simpleIndexed); + } + return TriangulateIdx(polygonsIndexed, precision); +} + +ExecutionParams &PolygonParams() { return params; } + +} // namespace manifold diff --git a/thirdparty/manifold/src/sdf/include/sdf.h b/thirdparty/manifold/src/sdf/include/sdf.h new file mode 100644 index 000000000000..d65afe803dac --- /dev/null +++ b/thirdparty/manifold/src/sdf/include/sdf.h @@ -0,0 +1,24 @@ +// Copyright 2023 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "public.h" + +namespace manifold { +Mesh LevelSet(std::function sdf, Box bounds, float edgeLength, + float level = 0, bool canParallel = true); +} diff --git a/thirdparty/manifold/src/sdf/src/sdf.cpp b/thirdparty/manifold/src/sdf/src/sdf.cpp new file mode 100644 index 000000000000..eaabe1f90a09 --- /dev/null +++ b/thirdparty/manifold/src/sdf/src/sdf.cpp @@ -0,0 +1,362 @@ +// Copyright 2023 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "sdf.h" + +#include "hashtable.h" +#include "par.h" +#include "utils.h" +#include "vec.h" + +namespace { +using namespace manifold; +Uint64 identity(Uint64 x) { return x; } + +glm::ivec3 TetTri0(int i) { + constexpr glm::ivec3 tetTri0[16] = {{-1, -1, -1}, // + {0, 3, 4}, // + {0, 1, 5}, // + {1, 5, 3}, // + {1, 4, 2}, // + {1, 0, 3}, // + {2, 5, 0}, // + {5, 3, 2}, // + {2, 3, 5}, // + {0, 5, 2}, // + {3, 0, 1}, // + {2, 4, 1}, // + {3, 5, 1}, // + {5, 1, 0}, // + {4, 3, 0}, // + {-1, -1, -1}}; + return tetTri0[i]; +} + +glm::ivec3 TetTri1(int i) { + constexpr glm::ivec3 tetTri1[16] = {{-1, -1, -1}, // + {-1, -1, -1}, // + {-1, -1, -1}, // + {3, 4, 1}, // + {-1, -1, -1}, // + {3, 2, 1}, // + {0, 4, 2}, // + {-1, -1, -1}, // + {-1, -1, -1}, // + {2, 4, 0}, // + {1, 2, 3}, // + {-1, -1, -1}, // + {1, 4, 3}, // + {-1, -1, -1}, // + {-1, -1, -1}, // + {-1, -1, -1}}; + return tetTri1[i]; +} + +glm::ivec4 Neighbors(int i) { + constexpr glm::ivec4 neighbors[7] = {{0, 0, 0, 1}, // + {1, 0, 0, 0}, // + {0, 1, 0, 0}, // + {0, 0, 1, 0}, // + {-1, 0, 0, 1}, // + {0, -1, 0, 1}, // + {0, 0, -1, 1}}; + return neighbors[i]; +} + +Uint64 SpreadBits3(Uint64 v) { + v = v & 0x1fffff; + v = (v | v << 32) & 0x1f00000000ffff; + v = (v | v << 16) & 0x1f0000ff0000ff; + v = (v | v << 8) & 0x100f00f00f00f00f; + v = (v | v << 4) & 0x10c30c30c30c30c3; + v = (v | v << 2) & 0x1249249249249249; + return v; +} + +Uint64 SqueezeBits3(Uint64 v) { + v = v & 0x1249249249249249; + v = (v ^ v >> 2) & 0x10c30c30c30c30c3; + v = (v ^ v >> 4) & 0x100f00f00f00f00f; + v = (v ^ v >> 8) & 0x1f0000ff0000ff; + v = (v ^ v >> 16) & 0x1f00000000ffff; + v = (v ^ v >> 32) & 0x1fffff; + return v; +} + +// This is a modified 3D MortonCode, where the xyz code is shifted by one bit +// and the w bit is added as the least significant. This allows 21 bits per x, +// y, and z channel and 1 for w, filling the 64 bit total. +Uint64 MortonCode(const glm::ivec4& index) { + return static_cast(index.w) | (SpreadBits3(index.x) << 1) | + (SpreadBits3(index.y) << 2) | (SpreadBits3(index.z) << 3); +} + +glm::ivec4 DecodeMorton(Uint64 code) { + glm::ivec4 index; + index.x = SqueezeBits3(code >> 1); + index.y = SqueezeBits3(code >> 2); + index.z = SqueezeBits3(code >> 3); + index.w = code & 0x1u; + return index; +} + +struct GridVert { + float distance = NAN; + int edgeVerts[7] = {-1, -1, -1, -1, -1, -1, -1}; + + int Inside() const { return distance > 0 ? 1 : -1; } + + int NeighborInside(int i) const { + return Inside() * (edgeVerts[i] < 0 ? 1 : -1); + } +}; + +struct ComputeVerts { + VecView vertPos; + VecView vertIndex; + HashTableD gridVerts; + const std::function sdf; + const glm::vec3 origin; + const glm::ivec3 gridSize; + const glm::vec3 spacing; + const float level; + + inline glm::vec3 Position(glm::ivec4 gridIndex) const { + return origin + + spacing * (glm::vec3(gridIndex) + (gridIndex.w == 1 ? 0.0f : -0.5f)); + } + + inline float BoundedSDF(glm::ivec4 gridIndex) const { + const float d = sdf(Position(gridIndex)) - level; + + const glm::ivec3 xyz(gridIndex); + const bool onLowerBound = glm::any(glm::lessThanEqual(xyz, glm::ivec3(0))); + const bool onUpperBound = glm::any(glm::greaterThanEqual(xyz, gridSize)); + const bool onHalfBound = + gridIndex.w == 1 && glm::any(glm::greaterThanEqual(xyz, gridSize - 1)); + if (onLowerBound || onUpperBound || onHalfBound) return glm::min(d, 0.0f); + + return d; + } + + inline void operator()(Uint64 mortonCode) { + ZoneScoped; + if (gridVerts.Full()) return; + + const glm::ivec4 gridIndex = DecodeMorton(mortonCode); + + if (glm::any(glm::greaterThan(glm::ivec3(gridIndex), gridSize))) return; + + const glm::vec3 position = Position(gridIndex); + + GridVert gridVert; + gridVert.distance = BoundedSDF(gridIndex); + + bool keep = false; + // These seven edges are uniquely owned by this gridVert; any of them + // which intersect the surface create a vert. + for (int i = 0; i < 7; ++i) { + glm::ivec4 neighborIndex = gridIndex + Neighbors(i); + if (neighborIndex.w == 2) { + neighborIndex += 1; + neighborIndex.w = 0; + } + const float val = BoundedSDF(neighborIndex); + if ((val > 0) == (gridVert.distance > 0)) continue; + keep = true; + + const int idx = AtomicAdd(vertIndex[0], 1); + vertPos[idx] = + (val * position - gridVert.distance * Position(neighborIndex)) / + (val - gridVert.distance); + gridVert.edgeVerts[i] = idx; + } + + if (keep) gridVerts.Insert(mortonCode, gridVert); + } +}; + +struct BuildTris { + VecView triVerts; + VecView triIndex; + const HashTableD gridVerts; + + void CreateTri(const glm::ivec3& tri, const int edges[6]) { + if (tri[0] < 0) return; + int idx = AtomicAdd(triIndex[0], 1); + triVerts[idx] = {edges[tri[0]], edges[tri[1]], edges[tri[2]]}; + } + + void CreateTris(const glm::ivec4& tet, const int edges[6]) { + const int i = (tet[0] > 0 ? 1 : 0) + (tet[1] > 0 ? 2 : 0) + + (tet[2] > 0 ? 4 : 0) + (tet[3] > 0 ? 8 : 0); + CreateTri(TetTri0(i), edges); + CreateTri(TetTri1(i), edges); + } + + void operator()(int idx) { + ZoneScoped; + Uint64 basekey = gridVerts.KeyAt(idx); + if (basekey == kOpen) return; + + const GridVert& base = gridVerts.At(idx); + const glm::ivec4 baseIndex = DecodeMorton(basekey); + + glm::ivec4 leadIndex = baseIndex; + if (leadIndex.w == 0) + leadIndex.w = 1; + else { + leadIndex += 1; + leadIndex.w = 0; + } + + // This GridVert is in charge of the 6 tetrahedra surrounding its edge in + // the (1,1,1) direction (edge 0). + glm::ivec4 tet(base.NeighborInside(0), base.Inside(), -2, -2); + glm::ivec4 thisIndex = baseIndex; + thisIndex.x += 1; + + GridVert thisVert = gridVerts[MortonCode(thisIndex)]; + + tet[2] = base.NeighborInside(1); + for (const int i : {0, 1, 2}) { + thisIndex = leadIndex; + --thisIndex[Prev3(i)]; + // MortonCodes take unsigned input, so check for negatives, given the + // decrement. + GridVert nextVert = thisIndex[Prev3(i)] < 0 + ? GridVert() + : gridVerts[MortonCode(thisIndex)]; + tet[3] = base.NeighborInside(Prev3(i) + 4); + + const int edges1[6] = {base.edgeVerts[0], + base.edgeVerts[i + 1], + nextVert.edgeVerts[Next3(i) + 4], + nextVert.edgeVerts[Prev3(i) + 1], + thisVert.edgeVerts[i + 4], + base.edgeVerts[Prev3(i) + 4]}; + thisVert = nextVert; + CreateTris(tet, edges1); + + thisIndex = baseIndex; + ++thisIndex[Next3(i)]; + nextVert = gridVerts[MortonCode(thisIndex)]; + tet[2] = tet[3]; + tet[3] = base.NeighborInside(Next3(i) + 1); + + const int edges2[6] = {base.edgeVerts[0], + edges1[5], + thisVert.edgeVerts[i + 4], + nextVert.edgeVerts[Next3(i) + 4], + edges1[3], + base.edgeVerts[Next3(i) + 1]}; + thisVert = nextVert; + CreateTris(tet, edges2); + + tet[2] = tet[3]; + } + } +}; +} // namespace + +namespace manifold { + +/** @addtogroup Core + * @{ + */ + +/** + * Constructs a level-set Mesh from the input Signed-Distance Function (SDF). + * This uses a form of Marching Tetrahedra (akin to Marching Cubes, but better + * for manifoldness). Instead of using a cubic grid, it uses a body-centered + * cubic grid (two shifted cubic grids). This means if your function's interior + * exceeds the given bounds, you will see a kind of egg-crate shape closing off + * the manifold, which is due to the underlying grid. + * + * @param sdf The signed-distance functor, containing this function signature: + * `float operator()(glm::vec3 point)`, which returns the + * signed distance of a given point in R^3. Positive values are inside, + * negative outside. + * @param bounds An axis-aligned box that defines the extent of the grid. + * @param edgeLength Approximate maximum edge length of the triangles in the + * final result. This affects grid spacing, and hence has a strong effect on + * performance. + * @param level You can inset your Mesh by using a positive value, or outset + * it with a negative value. + * @param canParallel Parallel policies violate will crash language runtimes + * with runtime locks that expect to not be called back by unregistered threads. + * This allows bindings use LevelSet despite being compiled with MANIFOLD_PAR + * active. + * @return Mesh This class does not depend on Manifold, so it just returns a + * Mesh, but it is guaranteed to be manifold and so can always be used as + * input to the Manifold constructor for further operations. + */ +Mesh LevelSet(std::function sdf, Box bounds, float edgeLength, + float level, bool canParallel) { + Mesh out; + + const glm::vec3 dim = bounds.Size(); + const glm::ivec3 gridSize(dim / edgeLength); + const glm::vec3 spacing = dim / (glm::vec3(gridSize)); + + const Uint64 maxMorton = MortonCode(glm::ivec4(gridSize + 1, 1)); + + // Parallel policies violate will crash language runtimes with runtime locks + // that expect to not be called back by unregistered threads. This allows + // bindings use LevelSet despite being compiled with MANIFOLD_PAR + // active. + const auto pol = canParallel ? autoPolicy(maxMorton) : ExecutionPolicy::Seq; + + size_t tableSize = glm::min( + 2 * maxMorton, static_cast(10 * glm::pow(maxMorton, 0.667))); + HashTable gridVerts(tableSize); + Vec vertPos(gridVerts.Size() * 7); + + while (1) { + Vec index(1, 0); + for_each_n(pol, countAt(0_z), maxMorton + 1, + ComputeVerts({vertPos, index, gridVerts.D(), sdf, bounds.min, + gridSize + 1, spacing, level})); + + if (gridVerts.Full()) { // Resize HashTable + const glm::vec3 lastVert = vertPos[index[0] - 1]; + const Uint64 lastMorton = + MortonCode(glm::ivec4((lastVert - bounds.min) / spacing, 1)); + const float ratio = static_cast(maxMorton) / lastMorton; + + if (ratio > 1000) // do not trust the ratio if it is too large + tableSize *= 2; + else + tableSize *= ratio; + gridVerts = HashTable(tableSize); + vertPos = Vec(gridVerts.Size() * 7); + } else { // Success + vertPos.resize(index[0]); + break; + } + } + + Vec triVerts(gridVerts.Entries() * 12); // worst case + + Vec index(1, 0); + for_each_n(pol, countAt(0_z), gridVerts.Size(), + BuildTris({triVerts, index, gridVerts.D()})); + triVerts.resize(index[0]); + + out.vertPos.insert(out.vertPos.end(), vertPos.begin(), vertPos.end()); + out.triVerts.insert(out.triVerts.end(), triVerts.begin(), triVerts.end()); + return out; +} +/** @} */ +} // namespace manifold diff --git a/thirdparty/manifold/src/third_party/quickhull/.gitignore b/thirdparty/manifold/src/third_party/quickhull/.gitignore new file mode 100644 index 000000000000..496ee2ca6a2f --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/.gitignore @@ -0,0 +1 @@ +.DS_Store \ No newline at end of file diff --git a/thirdparty/manifold/src/third_party/quickhull/.gitrepo b/thirdparty/manifold/src/third_party/quickhull/.gitrepo new file mode 100644 index 000000000000..d54da571cc74 --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/ingydotnet/git-subrepo#readme +; +[subrepo] + remote = https://github.com/akuukka/quickhull.git + branch = 1ffbc6f884ea1da89e104a5996cf8a726db673d5 + commit = 1ffbc6f884ea1da89e104a5996cf8a726db673d5 + parent = b8b715dff0615a5e85dc956d514bcf680efa61fa + method = merge + cmdver = 0.4.6 diff --git a/thirdparty/manifold/src/third_party/quickhull/ConvexHull.hpp b/thirdparty/manifold/src/third_party/quickhull/ConvexHull.hpp new file mode 100644 index 000000000000..71b2ee1114f3 --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/ConvexHull.hpp @@ -0,0 +1,182 @@ +#ifndef CONVEXHULL_HPP_ +#define CONVEXHULL_HPP_ + +#include "Structs/Vector3.hpp" +#include "Structs/Mesh.hpp" +#include "Structs/VertexDataSource.hpp" +#include +#include +#include +#include + +namespace quickhull { + + template + class ConvexHull { + std::unique_ptr>> m_optimizedVertexBuffer; + VertexDataSource m_vertices; + std::vector m_indices; + public: + ConvexHull() {} + + // Copy constructor + ConvexHull(const ConvexHull& o) { + m_indices = o.m_indices; + if (o.m_optimizedVertexBuffer) { + m_optimizedVertexBuffer.reset(new std::vector>(*o.m_optimizedVertexBuffer)); + m_vertices = VertexDataSource(*m_optimizedVertexBuffer); + } + else { + m_vertices = o.m_vertices; + } + } + + ConvexHull& operator=(const ConvexHull& o) { + if (&o == this) { + return *this; + } + m_indices = o.m_indices; + if (o.m_optimizedVertexBuffer) { + m_optimizedVertexBuffer.reset(new std::vector>(*o.m_optimizedVertexBuffer)); + m_vertices = VertexDataSource(*m_optimizedVertexBuffer); + } + else { + m_vertices = o.m_vertices; + } + return *this; + } + + ConvexHull(ConvexHull&& o) { + m_indices = std::move(o.m_indices); + if (o.m_optimizedVertexBuffer) { + m_optimizedVertexBuffer = std::move(o.m_optimizedVertexBuffer); + o.m_vertices = VertexDataSource(); + m_vertices = VertexDataSource(*m_optimizedVertexBuffer); + } + else { + m_vertices = o.m_vertices; + } + } + + ConvexHull& operator=(ConvexHull&& o) { + if (&o == this) { + return *this; + } + m_indices = std::move(o.m_indices); + if (o.m_optimizedVertexBuffer) { + m_optimizedVertexBuffer = std::move(o.m_optimizedVertexBuffer); + o.m_vertices = VertexDataSource(); + m_vertices = VertexDataSource(*m_optimizedVertexBuffer); + } + else { + m_vertices = o.m_vertices; + } + return *this; + } + + // Construct vertex and index buffers from half edge mesh and pointcloud + ConvexHull(const MeshBuilder& mesh, const VertexDataSource& pointCloud, bool CCW, bool useOriginalIndices) { + if (!useOriginalIndices) { + m_optimizedVertexBuffer.reset(new std::vector>()); + } + + std::vector faceProcessed(mesh.m_faces.size(),false); + std::vector faceStack; + std::unordered_map vertexIndexMapping; // Map vertex indices from original point cloud to the new mesh vertex indices + for (size_t i = 0;ipush_back(pointCloud[v]); + vertexIndexMapping[v] = m_optimizedVertexBuffer->size()-1; + v = m_optimizedVertexBuffer->size()-1; + } + else { + v = itV->second; + } + } + } + m_indices.push_back(vertices[0]); + m_indices.push_back(vertices[1 + iCCW]); + m_indices.push_back(vertices[2 - iCCW]); + } + } + + if (!useOriginalIndices) { + m_vertices = VertexDataSource(*m_optimizedVertexBuffer); + } + else { + m_vertices = pointCloud; + } + } + + std::vector& getIndexBuffer() { + return m_indices; + } + + const std::vector& getIndexBuffer() const { + return m_indices; + } + + VertexDataSource& getVertexBuffer() { + return m_vertices; + } + + const VertexDataSource& getVertexBuffer() const { + return m_vertices; + } + + // Export the mesh to a Waveform OBJ file + void writeWaveformOBJ(const std::string& filename, const std::string& objectName = "quickhull") const + { + std::ofstream objFile; + objFile.open (filename); + objFile << "o " << objectName << "\n"; + for (const auto& v : getVertexBuffer()) { + objFile << "v " << v.x << " " << v.y << " " << v.z << "\n"; + } + const auto& indBuf = getIndexBuffer(); + size_t triangleCount = indBuf.size()/3; + for (size_t i=0;i + class HalfEdgeMesh { + public: + + struct HalfEdge { + IndexType m_endVertex; + IndexType m_opp; + IndexType m_face; + IndexType m_next; + }; + + struct Face { + IndexType m_halfEdgeIndex; // Index of one of the half edges of this face + }; + + std::vector> m_vertices; + std::vector m_faces; + std::vector m_halfEdges; + + HalfEdgeMesh(const MeshBuilder& builderObject, const VertexDataSource& vertexData ) + { + std::unordered_map faceMapping; + std::unordered_map halfEdgeMapping; + std::unordered_map vertexMapping; + + size_t i=0; + for (const auto& face : builderObject.m_faces) { + if (!face.isDisabled()) { + m_faces.push_back({static_cast(face.m_he)}); + faceMapping[i] = m_faces.size()-1; + + const auto heIndices = builderObject.getHalfEdgeIndicesOfFace(face); + for (const auto heIndex : heIndices) { + const IndexType vertexIndex = builderObject.m_halfEdges[heIndex].m_endVertex; + if (vertexMapping.count(vertexIndex)==0) { + m_vertices.push_back(vertexData[vertexIndex]); + vertexMapping[vertexIndex] = m_vertices.size()-1; + } + } + } + i++; + } + + i=0; + for (const auto& halfEdge : builderObject.m_halfEdges) { + if (!halfEdge.isDisabled()) { + m_halfEdges.push_back({static_cast(halfEdge.m_endVertex),static_cast(halfEdge.m_opp),static_cast(halfEdge.m_face),static_cast(halfEdge.m_next)}); + halfEdgeMapping[i] = m_halfEdges.size()-1; + } + i++; + } + + for (auto& face : m_faces) { + assert(halfEdgeMapping.count(face.m_halfEdgeIndex) == 1); + face.m_halfEdgeIndex = halfEdgeMapping[face.m_halfEdgeIndex]; + } + + for (auto& he : m_halfEdges) { + he.m_face = faceMapping[he.m_face]; + he.m_opp = halfEdgeMapping[he.m_opp]; + he.m_next = halfEdgeMapping[he.m_next]; + he.m_endVertex = vertexMapping[he.m_endVertex]; + } + } + + }; +} + + +#endif /* HalfEdgeMesh_h */ diff --git a/thirdparty/manifold/src/third_party/quickhull/MathUtils.hpp b/thirdparty/manifold/src/third_party/quickhull/MathUtils.hpp new file mode 100644 index 000000000000..b87b3b934097 --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/MathUtils.hpp @@ -0,0 +1,46 @@ + +#ifndef QuickHull_MathUtils_hpp +#define QuickHull_MathUtils_hpp + +#include "Structs/Vector3.hpp" +#include "Structs/Ray.hpp" + +namespace quickhull { + + namespace mathutils { + + template + inline T getSquaredDistanceBetweenPointAndRay(const Vector3& p, const Ray& r) { + const Vector3 s = p-r.m_S; + T t = s.dotProduct(r.m_V); + return s.getLengthSquared() - t*t*r.m_VInvLengthSquared; + } + + // Note that the unit of distance returned is relative to plane's normal's length (divide by N.getNormalized() if needed to get the "real" distance). + template + inline T getSignedDistanceToPlane(const Vector3& v, const Plane& p) { + return p.m_N.dotProduct(v) + p.m_D; + } + + template + inline Vector3 getTriangleNormal(const Vector3& a,const Vector3& b,const Vector3& c) { + // We want to get (a-c).crossProduct(b-c) without constructing temp vectors + T x = a.x - c.x; + T y = a.y - c.y; + T z = a.z - c.z; + T rhsx = b.x - c.x; + T rhsy = b.y - c.y; + T rhsz = b.z - c.z; + T px = y * rhsz - z * rhsy ; + T py = z * rhsx - x * rhsz ; + T pz = x * rhsy - y * rhsx ; + return Vector3(px,py,pz); + } + + + } + +} + + +#endif diff --git a/thirdparty/manifold/src/third_party/quickhull/QuickHull.cpp b/thirdparty/manifold/src/third_party/quickhull/QuickHull.cpp new file mode 100644 index 000000000000..fc02f3d00423 --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/QuickHull.cpp @@ -0,0 +1,503 @@ +#include "QuickHull.hpp" +#include "MathUtils.hpp" +#include +#include +#include +#include +#include +#include "Structs/Mesh.hpp" + +namespace quickhull { + + template<> + float defaultEps() { + return 0.0001f; + } + + template<> + double defaultEps() { + return 0.0000001; + } + + /* + * Implementation of the algorithm + */ + + template + ConvexHull QuickHull::getConvexHull(const std::vector>& pointCloud, bool CCW, bool useOriginalIndices, T epsilon) { + VertexDataSource vertexDataSource(pointCloud); + return getConvexHull(vertexDataSource,CCW,useOriginalIndices,epsilon); + } + + template + ConvexHull QuickHull::getConvexHull(const Vector3* vertexData, size_t vertexCount, bool CCW, bool useOriginalIndices, T epsilon) { + VertexDataSource vertexDataSource(vertexData,vertexCount); + return getConvexHull(vertexDataSource,CCW,useOriginalIndices,epsilon); + } + + template + ConvexHull QuickHull::getConvexHull(const T* vertexData, size_t vertexCount, bool CCW, bool useOriginalIndices, T epsilon) { + VertexDataSource vertexDataSource((const vec3*)vertexData,vertexCount); + return getConvexHull(vertexDataSource,CCW,useOriginalIndices,epsilon); + } + + template + HalfEdgeMesh QuickHull::getConvexHullAsMesh(const FloatType* vertexData, size_t vertexCount, bool CCW, FloatType epsilon) { + VertexDataSource vertexDataSource((const vec3*)vertexData,vertexCount); + buildMesh(vertexDataSource, CCW, false, epsilon); + return HalfEdgeMesh(m_mesh, m_vertexData); + } + + template + void QuickHull::buildMesh(const VertexDataSource& pointCloud, bool CCW, bool useOriginalIndices, T epsilon) { + // CCW is unused for now + (void)CCW; + // useOriginalIndices is unused for now + (void)useOriginalIndices; + + if (pointCloud.size()==0) { + m_mesh = MeshBuilder(); + return; + } + m_vertexData = pointCloud; + + // Very first: find extreme values and use them to compute the scale of the point cloud. + m_extremeValues = getExtremeValues(); + m_scale = getScale(m_extremeValues); + + // Epsilon we use depends on the scale + m_epsilon = epsilon*m_scale; + m_epsilonSquared = m_epsilon*m_epsilon; + + // Reset diagnostics + m_diagnostics = DiagnosticsData(); + + m_planar = false; // The planar case happens when all the points appear to lie on a two dimensional subspace of R^3. + createConvexHalfEdgeMesh(); + if (m_planar) { + const size_t extraPointIndex = m_planarPointCloudTemp.size()-1; + for (auto& he : m_mesh.m_halfEdges) { + if (he.m_endVertex == extraPointIndex) { + he.m_endVertex = 0; + } + } + m_vertexData = pointCloud; + m_planarPointCloudTemp.clear(); + } + } + + template + ConvexHull QuickHull::getConvexHull(const VertexDataSource& pointCloud, bool CCW, bool useOriginalIndices, T epsilon) { + buildMesh(pointCloud,CCW,useOriginalIndices,epsilon); + return ConvexHull(m_mesh,m_vertexData, CCW, useOriginalIndices); + } + + template + void QuickHull::createConvexHalfEdgeMesh() { + m_visibleFaces.clear(); + m_horizonEdges.clear(); + m_possiblyVisibleFaces.clear(); + + // Compute base tetrahedron + setupInitialTetrahedron(); + assert(m_mesh.m_faces.size()==4); + + // Init face stack with those faces that have points assigned to them + m_faceList.clear(); + for (size_t i=0;i < 4;i++) { + auto& f = m_mesh.m_faces[i]; + if (f.m_pointsOnPositiveSide && f.m_pointsOnPositiveSide->size()>0) { + m_faceList.push_back(i); + f.m_inFaceStack = 1; + } + } + + // Process faces until the face list is empty. + size_t iter = 0; + while (!m_faceList.empty()) { + iter++; + if (iter == std::numeric_limits::max()) { + // Visible face traversal marks visited faces with iteration counter (to mark that the face has been visited on this iteration) and the max value represents unvisited faces. At this point we have to reset iteration counter. This shouldn't be an + // issue on 64 bit machines. + iter = 0; + } + + const size_t topFaceIndex = m_faceList.front(); + m_faceList.pop_front(); + + auto& tf = m_mesh.m_faces[topFaceIndex]; + tf.m_inFaceStack = 0; + + assert(!tf.m_pointsOnPositiveSide || tf.m_pointsOnPositiveSide->size()>0); + if (!tf.m_pointsOnPositiveSide || tf.isDisabled()) { + continue; + } + + // Pick the most distant point to this triangle plane as the point to which we extrude + const vec3& activePoint = m_vertexData[tf.m_mostDistantPoint]; + const size_t activePointIndex = tf.m_mostDistantPoint; + + // Find out the faces that have our active point on their positive side (these are the "visible faces"). The face on top of the stack of course is one of them. At the same time, we create a list of horizon edges. + m_horizonEdges.clear(); + m_possiblyVisibleFaces.clear(); + m_visibleFaces.clear(); + m_possiblyVisibleFaces.emplace_back(topFaceIndex,std::numeric_limits::max()); + while (m_possiblyVisibleFaces.size()) { + const auto faceData = m_possiblyVisibleFaces.back(); + m_possiblyVisibleFaces.pop_back(); + auto& pvf = m_mesh.m_faces[faceData.m_faceIndex]; + assert(!pvf.isDisabled()); + + if (pvf.m_visibilityCheckedOnIteration == iter) { + if (pvf.m_isVisibleFaceOnCurrentIteration) { + continue; + } + } + else { + const Plane& P = pvf.m_P; + pvf.m_visibilityCheckedOnIteration = iter; + const T d = P.m_N.dotProduct(activePoint)+P.m_D; + if (d>0) { + pvf.m_isVisibleFaceOnCurrentIteration = 1; + pvf.m_horizonEdgesOnCurrentIteration = 0; + m_visibleFaces.push_back(faceData.m_faceIndex); + for (auto heIndex : m_mesh.getHalfEdgeIndicesOfFace(pvf)) { + if (m_mesh.m_halfEdges[heIndex].m_opp != faceData.m_enteredFromHalfEdge) { + m_possiblyVisibleFaces.emplace_back( m_mesh.m_halfEdges[m_mesh.m_halfEdges[heIndex].m_opp].m_face,heIndex ); + } + } + continue; + } + assert(faceData.m_faceIndex != topFaceIndex); + } + + // The face is not visible. Therefore, the halfedge we came from is part of the horizon edge. + pvf.m_isVisibleFaceOnCurrentIteration = 0; + m_horizonEdges.push_back(faceData.m_enteredFromHalfEdge); + // Store which half edge is the horizon edge. The other half edges of the face will not be part of the final mesh so their data slots can by recycled. + const auto halfEdges = m_mesh.getHalfEdgeIndicesOfFace(m_mesh.m_faces[m_mesh.m_halfEdges[faceData.m_enteredFromHalfEdge].m_face]); + const std::int8_t ind = (halfEdges[0]==faceData.m_enteredFromHalfEdge) ? 0 : (halfEdges[1]==faceData.m_enteredFromHalfEdge ? 1 : 2); + m_mesh.m_faces[m_mesh.m_halfEdges[faceData.m_enteredFromHalfEdge].m_face].m_horizonEdgesOnCurrentIteration |= (1<begin(),tf.m_pointsOnPositiveSide->end(),activePointIndex); + tf.m_pointsOnPositiveSide->erase(it); + if (tf.m_pointsOnPositiveSide->size()==0) { + reclaimToIndexVectorPool(tf.m_pointsOnPositiveSide); + } + continue; + } + + // Except for the horizon edges, all half edges of the visible faces can be marked as disabled. Their data slots will be reused. + // The faces will be disabled as well, but we need to remember the points that were on the positive side of them - therefore + // we save pointers to them. + m_newFaceIndices.clear(); + m_newHalfEdgeIndices.clear(); + m_disabledFacePointVectors.clear(); + size_t disableCounter = 0; + for (auto faceIndex : m_visibleFaces) { + auto& disabledFace = m_mesh.m_faces[faceIndex]; + auto halfEdges = m_mesh.getHalfEdgeIndicesOfFace(disabledFace); + for (size_t j=0;j<3;j++) { + if ((disabledFace.m_horizonEdgesOnCurrentIteration & (1<size()); // Because we should not assign point vectors to faces unless needed... + m_disabledFacePointVectors.push_back(std::move(t)); + } + } + if (disableCounter < horizonEdgeCount*2) { + const size_t newHalfEdgesNeeded = horizonEdgeCount*2-disableCounter; + for (size_t i=0;i planeNormal = mathutils::getTriangleNormal(m_vertexData[A],m_vertexData[B],activePoint); + newFace.m_P = Plane(planeNormal,activePoint); + newFace.m_he = AB; + + m_mesh.m_halfEdges[CA].m_opp = m_newHalfEdgeIndices[i>0 ? i*2-1 : 2*horizonEdgeCount-1]; + m_mesh.m_halfEdges[BC].m_opp = m_newHalfEdgeIndices[((i+1)*2) % (horizonEdgeCount*2)]; + } + + // Assign points that were on the positive side of the disabled faces to the new faces. + for (auto& disabledPoints : m_disabledFacePointVectors) { + assert(disabledPoints); + for (const auto& point : *(disabledPoints)) { + if (point == activePointIndex) { + continue; + } + for (size_t j=0;jsize()>0); + if (!newFace.m_inFaceStack) { + m_faceList.push_back(newFaceIndex); + newFace.m_inFaceStack = 1; + } + } + } + } + + // Cleanup + m_indexVectorPool.clear(); + } + + /* + * Private helper functions + */ + + template + std::array QuickHull::getExtremeValues() { + std::array outIndices{0,0,0,0,0,0}; + T extremeVals[6] = {m_vertexData[0].x,m_vertexData[0].x,m_vertexData[0].y,m_vertexData[0].y,m_vertexData[0].z,m_vertexData[0].z}; + const size_t vCount = m_vertexData.size(); + for (size_t i=1;i& pos = m_vertexData[i]; + if (pos.x>extremeVals[0]) { + extremeVals[0]=pos.x; + outIndices[0]=i; + } + else if (pos.xextremeVals[2]) { + extremeVals[2]=pos.y; + outIndices[2]=i; + } + else if (pos.yextremeVals[4]) { + extremeVals[4]=pos.z; + outIndices[4]=i; + } + else if (pos.z + bool QuickHull::reorderHorizonEdges(std::vector& horizonEdges) { + const size_t horizonEdgeCount = horizonEdges.size(); + for (size_t i=0;i + T QuickHull::getScale(const std::array& extremeValues) { + T s = 0; + for (size_t i=0;i<6;i++) { + const T* v = (const T*)(&m_vertexData[extremeValues[i]]); + v += i/2; + auto a = std::abs(*v); + if (a>s) { + s = a; + } + } + return s; + } + + template + void QuickHull::setupInitialTetrahedron() { + const size_t vertexCount = m_vertexData.size(); + + // If we have at most 4 points, just return a degenerate tetrahedron: + if (vertexCount <= 4) { + size_t v[4] = {0,std::min((size_t)1,vertexCount-1),std::min((size_t)2,vertexCount-1),std::min((size_t)3,vertexCount-1)}; + const Vector3 N = mathutils::getTriangleNormal(m_vertexData[v[0]],m_vertexData[v[1]],m_vertexData[v[2]]); + const Plane trianglePlane(N,m_vertexData[v[0]]); + if (trianglePlane.isPointOnPositiveSide(m_vertexData[v[3]])) { + std::swap(v[0],v[1]); + } + return m_mesh.setup(v[0],v[1],v[2],v[3]); + } + + // Find two most distant extreme points. + T maxD = m_epsilonSquared; + std::pair selectedPoints; + for (size_t i=0;i<6;i++) { + for (size_t j=i+1;j<6;j++) { + const T d = m_vertexData[ m_extremeValues[i] ].getSquaredDistanceTo( m_vertexData[ m_extremeValues[j] ] ); + if (d > maxD) { + maxD=d; + selectedPoints={m_extremeValues[i],m_extremeValues[j]}; + } + } + } + if (maxD == m_epsilonSquared) { + // A degenerate case: the point cloud seems to consists of a single point + return m_mesh.setup(0,std::min((size_t)1,vertexCount-1),std::min((size_t)2,vertexCount-1),std::min((size_t)3,vertexCount-1)); + } + assert(selectedPoints.first != selectedPoints.second); + + // Find the most distant point to the line between the two chosen extreme points. + const Ray r(m_vertexData[selectedPoints.first], (m_vertexData[selectedPoints.second] - m_vertexData[selectedPoints.first])); + maxD = m_epsilonSquared; + size_t maxI=std::numeric_limits::max(); + const size_t vCount = m_vertexData.size(); + for (size_t i=0;i maxD) { + maxD=distToRay; + maxI=i; + } + } + if (maxD == m_epsilonSquared) { + // It appears that the point cloud belongs to a 1 dimensional subspace of R^3: convex hull has no volume => return a thin triangle + // Pick any point other than selectedPoints.first and selectedPoints.second as the third point of the triangle + auto it = std::find_if(m_vertexData.begin(),m_vertexData.end(),[&](const vec3& ve) { + return ve != m_vertexData[selectedPoints.first] && ve != m_vertexData[selectedPoints.second]; + }); + const size_t thirdPoint = (it == m_vertexData.end()) ? selectedPoints.first : std::distance(m_vertexData.begin(),it); + it = std::find_if(m_vertexData.begin(),m_vertexData.end(),[&](const vec3& ve) { + return ve != m_vertexData[selectedPoints.first] && ve != m_vertexData[selectedPoints.second] && ve != m_vertexData[thirdPoint]; + }); + const size_t fourthPoint = (it == m_vertexData.end()) ? selectedPoints.first : std::distance(m_vertexData.begin(),it); + return m_mesh.setup(selectedPoints.first,selectedPoints.second,thirdPoint,fourthPoint); + } + + // These three points form the base triangle for our tetrahedron. + assert(selectedPoints.first != maxI && selectedPoints.second != maxI); + std::array baseTriangle{selectedPoints.first, selectedPoints.second, maxI}; + const Vector3 baseTriangleVertices[]={ m_vertexData[baseTriangle[0]], m_vertexData[baseTriangle[1]], m_vertexData[baseTriangle[2]] }; + + // Next step is to find the 4th vertex of the tetrahedron. We naturally choose the point farthest away from the triangle plane. + maxD=m_epsilon; + maxI=0; + const Vector3 N = mathutils::getTriangleNormal(baseTriangleVertices[0],baseTriangleVertices[1],baseTriangleVertices[2]); + Plane trianglePlane(N,baseTriangleVertices[0]); + for (size_t i=0;imaxD) { + maxD=d; + maxI=i; + } + } + if (maxD == m_epsilon) { + // All the points seem to lie on a 2D subspace of R^3. How to handle this? Well, let's add one extra point to the point cloud so that the convex hull will have volume. + m_planar = true; + const vec3 N1 = mathutils::getTriangleNormal(baseTriangleVertices[1],baseTriangleVertices[2],baseTriangleVertices[0]); + m_planarPointCloudTemp.clear(); + m_planarPointCloudTemp.insert(m_planarPointCloudTemp.begin(),m_vertexData.begin(),m_vertexData.end()); + const vec3 extraPoint = N1 + m_vertexData[0]; + m_planarPointCloudTemp.push_back(extraPoint); + maxI = m_planarPointCloudTemp.size()-1; + m_vertexData = VertexDataSource(m_planarPointCloudTemp); + } + + // Enforce CCW orientation (if user prefers clockwise orientation, swap two vertices in each triangle when final mesh is created) + const Plane triPlane(N,baseTriangleVertices[0]); + if (triPlane.isPointOnPositiveSide(m_vertexData[maxI])) { + std::swap(baseTriangle[0],baseTriangle[1]); + } + + // Create a tetrahedron half edge mesh and compute planes defined by each triangle + m_mesh.setup(baseTriangle[0],baseTriangle[1],baseTriangle[2],maxI); + for (auto& f : m_mesh.m_faces) { + auto v = m_mesh.getVertexIndicesOfFace(f); + const Vector3& va = m_vertexData[v[0]]; + const Vector3& vb = m_vertexData[v[1]]; + const Vector3& vc = m_vertexData[v[2]]; + const Vector3 N1 = mathutils::getTriangleNormal(va, vb, vc); + const Plane plane(N1,va); + f.m_P = plane; + } + + // Finally we assign a face for each vertex outside the tetrahedron (vertices inside the tetrahedron have no role anymore) + for (size_t i=0;i; + template class QuickHull; +} + diff --git a/thirdparty/manifold/src/third_party/quickhull/QuickHull.hpp b/thirdparty/manifold/src/third_party/quickhull/QuickHull.hpp new file mode 100644 index 000000000000..436c4d9f165c --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/QuickHull.hpp @@ -0,0 +1,223 @@ +#ifndef QUICKHULL_HPP_ +#define QUICKHULL_HPP_ +#include +#include +#include +#include +#include "Structs/Vector3.hpp" +#include "Structs/Plane.hpp" +#include "Structs/Pool.hpp" +#include "Structs/Mesh.hpp" +#include "ConvexHull.hpp" +#include "HalfEdgeMesh.hpp" +#include "MathUtils.hpp" + +/* + * Implementation of the 3D QuickHull algorithm by Antti Kuukka + * + * No copyrights. What follows is 100% Public Domain. + * + * + * + * INPUT: a list of points in 3D space (for example, vertices of a 3D mesh) + * + * OUTPUT: a ConvexHull object which provides vertex and index buffers of the generated convex hull as a triangle mesh. + * + * + * + * The implementation is thread-safe if each thread is using its own QuickHull object. + * + * + * SUMMARY OF THE ALGORITHM: + * - Create initial simplex (tetrahedron) using extreme points. We have four faces now and they form a convex mesh M. + * - For each point, assign them to the first face for which they are on the positive side of (so each point is assigned to at most + * one face). Points inside the initial tetrahedron are left behind now and no longer affect the calculations. + * - Add all faces that have points assigned to them to Face Stack. + * - Iterate until Face Stack is empty: + * - Pop topmost face F from the stack + * - From the points assigned to F, pick the point P that is farthest away from the plane defined by F. + * - Find all faces of M that have P on their positive side. Let us call these the "visible faces". + * - Because of the way M is constructed, these faces are connected. Solve their horizon edge loop. + * - "Extrude to P": Create new faces by connecting P with the points belonging to the horizon edge. Add the new faces to M and remove the visible + * faces from M. + * - Each point that was assigned to visible faces is now assigned to at most one of the newly created faces. + * - Those new faces that have points assigned to them are added to the top of Face Stack. + * - M is now the convex hull. + * + * TO DO: + * - Implement a proper 2D QuickHull and use that to solve the degenerate 2D case (when all the points lie on the same plane in 3D space). + * */ + +namespace quickhull { + + struct DiagnosticsData { + size_t m_failedHorizonEdges; // How many times QuickHull failed to solve the horizon edge. Failures lead to degenerated convex hulls. + + DiagnosticsData() : m_failedHorizonEdges(0) { } + }; + + template + FloatType defaultEps(); + + template + class QuickHull { + using vec3 = Vector3; + + FloatType m_epsilon, m_epsilonSquared, m_scale; + bool m_planar; + std::vector m_planarPointCloudTemp; + VertexDataSource m_vertexData; + MeshBuilder m_mesh; + std::array m_extremeValues; + DiagnosticsData m_diagnostics; + + // Temporary variables used during iteration process + std::vector m_newFaceIndices; + std::vector m_newHalfEdgeIndices; + std::vector< std::unique_ptr> > m_disabledFacePointVectors; + std::vector m_visibleFaces; + std::vector m_horizonEdges; + struct FaceData { + size_t m_faceIndex; + size_t m_enteredFromHalfEdge; // If the face turns out not to be visible, this half edge will be marked as horizon edge + FaceData(size_t fi, size_t he) : m_faceIndex(fi),m_enteredFromHalfEdge(he) {} + }; + std::vector m_possiblyVisibleFaces; + std::deque m_faceList; + + // Create a half edge mesh representing the base tetrahedron from which the QuickHull iteration proceeds. m_extremeValues must be properly set up when this is called. + void setupInitialTetrahedron(); + + // Given a list of half edges, try to rearrange them so that they form a loop. Return true on success. + bool reorderHorizonEdges(std::vector& horizonEdges); + + // Find indices of extreme values (max x, min x, max y, min y, max z, min z) for the given point cloud + std::array getExtremeValues(); + + // Compute scale of the vertex data. + FloatType getScale(const std::array& extremeValues); + + // Each face contains a unique pointer to a vector of indices. However, many - often most - faces do not have any points on the positive + // side of them especially at the the end of the iteration. When a face is removed from the mesh, its associated point vector, if such + // exists, is moved to the index vector pool, and when we need to add new faces with points on the positive side to the mesh, + // we reuse these vectors. This reduces the amount of std::vectors we have to deal with, and impact on performance is remarkable. + Pool> m_indexVectorPool; + inline std::unique_ptr> getIndexVectorFromPool(); + inline void reclaimToIndexVectorPool(std::unique_ptr>& ptr); + + // Associates a point with a face if the point resides on the positive side of the plane. Returns true if the points was on the positive side. + inline bool addPointToFace(typename MeshBuilder::Face& f, size_t pointIndex); + + // This will update m_mesh from which we create the ConvexHull object that getConvexHull function returns + void createConvexHalfEdgeMesh(); + + // Constructs the convex hull into a MeshBuilder object which can be converted to a ConvexHull or Mesh object + void buildMesh(const VertexDataSource& pointCloud, bool CCW, bool useOriginalIndices, FloatType eps); + + // The public getConvexHull functions will setup a VertexDataSource object and call this + ConvexHull getConvexHull(const VertexDataSource& pointCloud, bool CCW, bool useOriginalIndices, FloatType eps); + public: + // Computes convex hull for a given point cloud. + // Params: + // pointCloud: a vector of of 3D points + // CCW: whether the output mesh triangles should have CCW orientation + // useOriginalIndices: should the output mesh use same vertex indices as the original point cloud. If this is false, + // then we generate a new vertex buffer which contains only the vertices that are part of the convex hull. + // eps: minimum distance to a plane to consider a point being on positive of it (for a point cloud with scale 1) + ConvexHull getConvexHull(const std::vector>& pointCloud, + bool CCW, + bool useOriginalIndices, + FloatType eps = defaultEps()); + + // Computes convex hull for a given point cloud. + // Params: + // vertexData: pointer to the first 3D point of the point cloud + // vertexCount: number of vertices in the point cloud + // CCW: whether the output mesh triangles should have CCW orientation + // useOriginalIndices: should the output mesh use same vertex indices as the original point cloud. If this is false, + // then we generate a new vertex buffer which contains only the vertices that are part of the convex hull. + // eps: minimum distance to a plane to consider a point being on positive side of it (for a point cloud with scale 1) + ConvexHull getConvexHull(const Vector3* vertexData, + size_t vertexCount, + bool CCW, + bool useOriginalIndices, + FloatType eps = defaultEps()); + + // Computes convex hull for a given point cloud. This function assumes that the vertex data resides in memory + // in the following format: x_0,y_0,z_0,x_1,y_1,z_1,... + // Params: + // vertexData: pointer to the X component of the first point of the point cloud. + // vertexCount: number of vertices in the point cloud + // CCW: whether the output mesh triangles should have CCW orientation + // useOriginalIndices: should the output mesh use same vertex indices as the original point cloud. If this is false, + // then we generate a new vertex buffer which contains only the vertices that are part of the convex hull. + // eps: minimum distance to a plane to consider a point being on positive side of it (for a point cloud with scale 1) + ConvexHull getConvexHull(const FloatType* vertexData, + size_t vertexCount, + bool CCW, + bool useOriginalIndices, + FloatType eps = defaultEps()); + + // Computes convex hull for a given point cloud. This function assumes that the vertex data resides in memory + // in the following format: x_0,y_0,z_0,x_1,y_1,z_1,... + // Params: + // vertexData: pointer to the X component of the first point of the point cloud. + // vertexCount: number of vertices in the point cloud + // CCW: whether the output mesh triangles should have CCW orientation + // eps: minimum distance to a plane to consider a point being on positive side of it (for a point cloud with scale 1) + // Returns: + // Convex hull of the point cloud as a mesh object with half edge structure. + HalfEdgeMesh getConvexHullAsMesh(const FloatType* vertexData, + size_t vertexCount, + bool CCW, + FloatType eps = defaultEps()); + + // Get diagnostics about last generated convex hull + const DiagnosticsData& getDiagnostics() { + return m_diagnostics; + } + }; + + /* + * Inline function definitions + */ + + template + std::unique_ptr> QuickHull::getIndexVectorFromPool() { + auto r = m_indexVectorPool.get(); + r->clear(); + return r; + } + + template + void QuickHull::reclaimToIndexVectorPool(std::unique_ptr>& ptr) { + const size_t oldSize = ptr->size(); + if ((oldSize+1)*128 < ptr->capacity()) { + // Reduce memory usage! Huge vectors are needed at the beginning of iteration when faces have many points on their positive side. Later on, smaller vectors will suffice. + ptr.reset(nullptr); + return; + } + m_indexVectorPool.reclaim(ptr); + } + + template + bool QuickHull::addPointToFace(typename MeshBuilder::Face& f, size_t pointIndex) { + const T D = mathutils::getSignedDistanceToPlane(m_vertexData[ pointIndex ],f.m_P); + if (D>0 && D*D > m_epsilonSquared*f.m_P.m_sqrNLength) { + if (!f.m_pointsOnPositiveSide) { + f.m_pointsOnPositiveSide = std::move(getIndexVectorFromPool()); + } + f.m_pointsOnPositiveSide->push_back( pointIndex ); + if (D > f.m_mostDistantPointDist) { + f.m_mostDistantPointDist = D; + f.m_mostDistantPoint = pointIndex; + } + return true; + } + return false; + } + +} + + +#endif /* QUICKHULL_HPP_ */ diff --git a/thirdparty/manifold/src/third_party/quickhull/README.md b/thirdparty/manifold/src/third_party/quickhull/README.md new file mode 100644 index 000000000000..a36c081b7855 --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/README.md @@ -0,0 +1,31 @@ +This implementation is 100% Public Domain. + +Feel free to use. + +C++11 is needed to compile it. + +Basic usage: + + #include "quickhull/quickhull.hpp" + + using namespace quickhull; + QuickHull qh; // Could be double as well + std::vector> pointCloud; + // Add points to point cloud + ... + auto hull = qh.getConvexHull(pointCloud, true, false); + const auto& indexBuffer = hull.getIndexBuffer(); + const auto& vertexBuffer = hull.getVertexBuffer(); + // Do what you want with the convex triangle mesh + +Vertex data can be passed as a pointer to float/double as long as the data is in X_0,Y_0,Z_0,X_1,Y_1,Z_1,...,X_N,Y_N_Z_N format: + + auto hull = qh.getConvexHull(&pointCloud[0].x, pointCloud.size(), true, false); + +The first boolean parameter of getConvexHull specifies whether the resulting mesh should have its triangles in CCW orientation. + +The second boolean parameter specifies whether the mesh should use vertex indices of the original point cloud. If it is false, a new vertex buffer is generated which consists only of those vertices that are part of the convex hull. In this case, the new vertex buffer is owned by the returned ConvexHull object. Otherwise, the original point cloud is used as vertex buffer and since the vertices are not copied, make sure you don't call ConvexHull::getVertexBuffer after releasing the memory that contains the original point cloud data. + +This implementation is fast, because the convex hull is internally built using a half edge mesh representation which provides quick access to adjacent faces. It is also possible to get the output convex hull as a half edge mesh: + + auto mesh = qh.getConvexHullAsMesh(&pointCloud[0].x, pointCloud.size(), true); diff --git a/thirdparty/manifold/src/third_party/quickhull/Structs/Mesh.hpp b/thirdparty/manifold/src/third_party/quickhull/Structs/Mesh.hpp new file mode 100644 index 000000000000..f58c5afcdfb7 --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/Structs/Mesh.hpp @@ -0,0 +1,255 @@ +#ifndef MESH_HPP_ +#define MESH_HPP_ + +#include +#include "Vector3.hpp" +#include "Plane.hpp" +#include "Pool.hpp" +#include +#include +#include +#include +#include "VertexDataSource.hpp" +#include +#include + +namespace quickhull { + + template + class MeshBuilder { + public: + struct HalfEdge { + size_t m_endVertex; + size_t m_opp; + size_t m_face; + size_t m_next; + + void disable() { + m_endVertex = std::numeric_limits::max(); + } + + bool isDisabled() const { + return m_endVertex == std::numeric_limits::max(); + } + }; + + struct Face { + size_t m_he; + Plane m_P{}; + T m_mostDistantPointDist; + size_t m_mostDistantPoint; + size_t m_visibilityCheckedOnIteration; + std::uint8_t m_isVisibleFaceOnCurrentIteration : 1; + std::uint8_t m_inFaceStack : 1; + std::uint8_t m_horizonEdgesOnCurrentIteration : 3; // Bit for each half edge assigned to this face, each being 0 or 1 depending on whether the edge belongs to horizon edge + std::unique_ptr> m_pointsOnPositiveSide; + + Face() : m_he(std::numeric_limits::max()), + m_mostDistantPointDist(0), + m_mostDistantPoint(0), + m_visibilityCheckedOnIteration(0), + m_isVisibleFaceOnCurrentIteration(0), + m_inFaceStack(0), + m_horizonEdgesOnCurrentIteration(0) + { + + } + + void disable() { + m_he = std::numeric_limits::max(); + } + + bool isDisabled() const { + return m_he == std::numeric_limits::max(); + } + }; + + // Mesh data + std::vector m_faces; + std::vector m_halfEdges; + + // When the mesh is modified and faces and half edges are removed from it, we do not actually remove them from the container vectors. + // Insted, they are marked as disabled which means that the indices can be reused when we need to add new faces and half edges to the mesh. + // We store the free indices in the following vectors. + std::vector m_disabledFaces,m_disabledHalfEdges; + + size_t addFace() { + if (m_disabledFaces.size()) { + size_t index = m_disabledFaces.back(); + auto& f = m_faces[index]; + assert(f.isDisabled()); + assert(!f.m_pointsOnPositiveSide); + f.m_mostDistantPointDist = 0; + m_disabledFaces.pop_back(); + return index; + } + m_faces.emplace_back(); + return m_faces.size()-1; + } + + size_t addHalfEdge() { + if (m_disabledHalfEdges.size()) { + const size_t index = m_disabledHalfEdges.back(); + m_disabledHalfEdges.pop_back(); + return index; + } + m_halfEdges.emplace_back(); + return m_halfEdges.size()-1; + } + + // Mark a face as disabled and return a pointer to the points that were on the positive of it. + std::unique_ptr> disableFace(size_t faceIndex) { + auto& f = m_faces[faceIndex]; + f.disable(); + m_disabledFaces.push_back(faceIndex); + return std::move(f.m_pointsOnPositiveSide); + } + + void disableHalfEdge(size_t heIndex) { + auto& he = m_halfEdges[heIndex]; + he.disable(); + m_disabledHalfEdges.push_back(heIndex); + } + + MeshBuilder() = default; + + // Create a mesh with initial tetrahedron ABCD. Dot product of AB with the normal of triangle ABC should be negative. + void setup(size_t a, size_t b, size_t c, size_t d) { + m_faces.clear(); + m_halfEdges.clear(); + m_disabledFaces.clear(); + m_disabledHalfEdges.clear(); + + m_faces.reserve(4); + m_halfEdges.reserve(12); + + // Create halfedges + HalfEdge AB; + AB.m_endVertex = b; + AB.m_opp = 6; + AB.m_face = 0; + AB.m_next = 1; + m_halfEdges.push_back(AB); + + HalfEdge BC; + BC.m_endVertex = c; + BC.m_opp = 9; + BC.m_face = 0; + BC.m_next = 2; + m_halfEdges.push_back(BC); + + HalfEdge CA; + CA.m_endVertex = a; + CA.m_opp = 3; + CA.m_face = 0; + CA.m_next = 0; + m_halfEdges.push_back(CA); + + HalfEdge AC; + AC.m_endVertex = c; + AC.m_opp = 2; + AC.m_face = 1; + AC.m_next = 4; + m_halfEdges.push_back(AC); + + HalfEdge CD; + CD.m_endVertex = d; + CD.m_opp = 11; + CD.m_face = 1; + CD.m_next = 5; + m_halfEdges.push_back(CD); + + HalfEdge DA; + DA.m_endVertex = a; + DA.m_opp = 7; + DA.m_face = 1; + DA.m_next = 3; + m_halfEdges.push_back(DA); + + HalfEdge BA; + BA.m_endVertex = a; + BA.m_opp = 0; + BA.m_face = 2; + BA.m_next = 7; + m_halfEdges.push_back(BA); + + HalfEdge AD; + AD.m_endVertex = d; + AD.m_opp = 5; + AD.m_face = 2; + AD.m_next = 8; + m_halfEdges.push_back(AD); + + HalfEdge DB; + DB.m_endVertex = b; + DB.m_opp = 10; + DB.m_face = 2; + DB.m_next = 6; + m_halfEdges.push_back(DB); + + HalfEdge CB; + CB.m_endVertex = b; + CB.m_opp = 1; + CB.m_face = 3; + CB.m_next = 10; + m_halfEdges.push_back(CB); + + HalfEdge BD; + BD.m_endVertex = d; + BD.m_opp = 8; + BD.m_face = 3; + BD.m_next = 11; + m_halfEdges.push_back(BD); + + HalfEdge DC; + DC.m_endVertex = c; + DC.m_opp = 4; + DC.m_face = 3; + DC.m_next = 9; + m_halfEdges.push_back(DC); + + // Create faces + Face ABC; + ABC.m_he = 0; + m_faces.push_back(std::move(ABC)); + + Face ACD; + ACD.m_he = 3; + m_faces.push_back(std::move(ACD)); + + Face BAD; + BAD.m_he = 6; + m_faces.push_back(std::move(BAD)); + + Face CBD; + CBD.m_he = 9; + m_faces.push_back(std::move(CBD)); + } + + std::array getVertexIndicesOfFace(const Face& f) const { + std::array v; + const HalfEdge* he = &m_halfEdges[f.m_he]; + v[0] = he->m_endVertex; + he = &m_halfEdges[he->m_next]; + v[1] = he->m_endVertex; + he = &m_halfEdges[he->m_next]; + v[2] = he->m_endVertex; + return v; + } + + std::array getVertexIndicesOfHalfEdge(const HalfEdge& he) const { + return {m_halfEdges[he.m_opp].m_endVertex,he.m_endVertex}; + } + + std::array getHalfEdgeIndicesOfFace(const Face& f) const { + return {f.m_he,m_halfEdges[f.m_he].m_next,m_halfEdges[m_halfEdges[f.m_he].m_next].m_next}; + } + }; + + + +} + + + +#endif diff --git a/thirdparty/manifold/src/third_party/quickhull/Structs/Plane.hpp b/thirdparty/manifold/src/third_party/quickhull/Structs/Plane.hpp new file mode 100644 index 000000000000..903a7efe3630 --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/Structs/Plane.hpp @@ -0,0 +1,36 @@ +#ifndef QHPLANE_HPP_ +#define QHPLANE_HPP_ + +#include "Vector3.hpp" + +namespace quickhull { + + template + class Plane { + public: + Vector3 m_N; + + // Signed distance (if normal is of length 1) to the plane from origin + T m_D; + + // Normal length squared + T m_sqrNLength; + + bool isPointOnPositiveSide(const Vector3& Q) const { + T d = m_N.dotProduct(Q)+m_D; + if (d>=0) return true; + return false; + } + + Plane() = default; + + // Construct a plane using normal N and any point P on the plane + Plane(const Vector3& N, const Vector3& P) : m_N(N), m_D(-N.dotProduct(P)), m_sqrNLength(m_N.x*m_N.x+m_N.y*m_N.y+m_N.z*m_N.z) { + + } + }; + +} + + +#endif /* PLANE_HPP_ */ diff --git a/thirdparty/manifold/src/third_party/quickhull/Structs/Pool.hpp b/thirdparty/manifold/src/third_party/quickhull/Structs/Pool.hpp new file mode 100644 index 000000000000..4216b4a15339 --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/Structs/Pool.hpp @@ -0,0 +1,35 @@ +#ifndef Pool_h +#define Pool_h + +#include +#include + +namespace quickhull { + + template + class Pool { + std::vector> m_data; + public: + void clear() { + m_data.clear(); + } + + void reclaim(std::unique_ptr& ptr) { + m_data.push_back(std::move(ptr)); + } + + std::unique_ptr get() { + if (m_data.size()==0) { + return std::unique_ptr(new T()); + } + auto it = m_data.end()-1; + std::unique_ptr r = std::move(*it); + m_data.erase(it); + return r; + } + + }; + +} + +#endif /* Pool_h */ diff --git a/thirdparty/manifold/src/third_party/quickhull/Structs/Ray.hpp b/thirdparty/manifold/src/third_party/quickhull/Structs/Ray.hpp new file mode 100644 index 000000000000..19b60728e5b1 --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/Structs/Ray.hpp @@ -0,0 +1,21 @@ +#ifndef QuickHull_Ray_hpp +#define QuickHull_Ray_hpp + +#include "Vector3.hpp" + +namespace quickhull { + + template + struct Ray { + const Vector3 m_S; + const Vector3 m_V; + const T m_VInvLengthSquared; + + Ray(const Vector3& S,const Vector3& V) : m_S(S), m_V(V), m_VInvLengthSquared(1/m_V.getLengthSquared()) { + } + }; + +} + + +#endif diff --git a/thirdparty/manifold/src/third_party/quickhull/Structs/Vector3.hpp b/thirdparty/manifold/src/third_party/quickhull/Structs/Vector3.hpp new file mode 100644 index 000000000000..47a5f74617dc --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/Structs/Vector3.hpp @@ -0,0 +1,140 @@ +#ifndef QuickHull_Vector3_hpp +#define QuickHull_Vector3_hpp + +#include +#include + +namespace quickhull { + + template + class Vector3 + { + public: + Vector3() = default; + + Vector3(T x, T y, T z) : x(x), y(y), z(z) { + + } + + T x,y,z; + + T dotProduct(const Vector3& other) const { + return x*other.x+y*other.y+z*other.z; + } + + void normalize() { + const T len = getLength(); + x/=len; + y/=len; + z/=len; + } + + Vector3 getNormalized() const { + const T len = getLength(); + return Vector3(x/len,y/len,z/len); + } + + T getLength() const { + return std::sqrt(x*x+y*y+z*z); + } + + Vector3 operator-(const Vector3& other) const { + return Vector3(x-other.x,y-other.y,z-other.z); + } + + Vector3 operator+(const Vector3& other) const { + return Vector3(x+other.x,y+other.y,z+other.z); + } + + Vector3& operator+=(const Vector3& other) { + x+=other.x; + y+=other.y; + z+=other.z; + return *this; + } + Vector3& operator-=(const Vector3& other) { + x-=other.x; + y-=other.y; + z-=other.z; + return *this; + } + Vector3& operator*=(T c) { + x*=c; + y*=c; + z*=c; + return *this; + } + + Vector3& operator/=(T c) { + x/=c; + y/=c; + z/=c; + return *this; + } + + Vector3 operator-() const { + return Vector3(-x,-y,-z); + } + + template + Vector3 operator*(S c) const { + return Vector3(x*c,y*c,z*c); + } + + template + Vector3 operator/(S c) const { + return Vector3(x/c,y/c,z/c); + } + + T getLengthSquared() const { + return x*x + y*y + z*z; + } + + bool operator!=(const Vector3& o) const { + return x != o.x || y != o.y || z != o.z; + } + + // Projection onto another vector + Vector3 projection(const Vector3& o) const { + T C = dotProduct(o)/o.getLengthSquared(); + return o*C; + } + + Vector3 crossProduct (const Vector3& rhs ) { + T a = y * rhs.z - z * rhs.y ; + T b = z * rhs.x - x * rhs.z ; + T c = x * rhs.y - y * rhs.x ; + Vector3 product( a , b , c ) ; + return product ; + } + + T getDistanceTo(const Vector3& other) const { + Vector3 diff = *this - other; + return diff.getLength(); + } + + T getSquaredDistanceTo(const Vector3& other) const { + const T dx = x-other.x; + const T dy = y-other.y; + const T dz = z-other.z; + return dx*dx+dy*dy+dz*dz; + } + + }; + + // Overload also << operator for easy printing of debug data + template + inline std::ostream& operator<<(std::ostream& os, const Vector3& vec) { + os << "(" << vec.x << "," << vec.y << "," << vec.z << ")"; + return os; + } + + template + inline Vector3 operator*(T c, const Vector3& v) { + return Vector3(v.x*c,v.y*c,v.z*c); + } + +} + + +#endif diff --git a/thirdparty/manifold/src/third_party/quickhull/Structs/VertexDataSource.hpp b/thirdparty/manifold/src/third_party/quickhull/Structs/VertexDataSource.hpp new file mode 100644 index 000000000000..162b643bd096 --- /dev/null +++ b/thirdparty/manifold/src/third_party/quickhull/Structs/VertexDataSource.hpp @@ -0,0 +1,48 @@ +#ifndef VertexDataSource_h +#define VertexDataSource_h + +#include "Vector3.hpp" + +namespace quickhull { + + template + class VertexDataSource { + const Vector3* m_ptr; + size_t m_count; + + public: + VertexDataSource(const Vector3* ptr, size_t count) : m_ptr(ptr), m_count(count) { + + } + + VertexDataSource(const std::vector>& vec) : m_ptr(&vec[0]), m_count(vec.size()) { + + } + + VertexDataSource() : m_ptr(nullptr), m_count(0) { + + } + + VertexDataSource& operator=(const VertexDataSource& other) = default; + + size_t size() const { + return m_count; + } + + const Vector3& operator[](size_t index) const { + return m_ptr[index]; + } + + const Vector3* begin() const { + return m_ptr; + } + + const Vector3* end() const { + return m_ptr + m_count; + } + }; + +} + + +#endif /* VertexDataSource_h */ diff --git a/thirdparty/manifold/src/utilities/include/hashtable.h b/thirdparty/manifold/src/utilities/include/hashtable.h new file mode 100644 index 000000000000..ed0f5538d2bc --- /dev/null +++ b/thirdparty/manifold/src/utilities/include/hashtable.h @@ -0,0 +1,174 @@ +// Copyright 2022 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once +#include + +#include + +#include "public.h" +#include "utils.h" +#include "vec.h" + +namespace { +typedef unsigned long long int Uint64; +typedef Uint64 (*hash_fun_t)(Uint64); +inline constexpr Uint64 kOpen = std::numeric_limits::max(); + +template +T AtomicCAS(T& target, T compare, T val) { + std::atomic& tar = reinterpret_cast&>(target); + tar.compare_exchange_strong(compare, val, std::memory_order_acq_rel); + return compare; +} + +template +void AtomicStore(T& target, T val) { + std::atomic& tar = reinterpret_cast&>(target); + // release is good enough, although not really something general + tar.store(val, std::memory_order_release); +} + +template +T AtomicLoad(const T& target) { + const std::atomic& tar = reinterpret_cast&>(target); + // acquire is good enough, although not general + return tar.load(std::memory_order_acquire); +} + +// https://stackoverflow.com/questions/664014/what-integer-hash-function-are-good-that-accepts-an-integer-hash-key +inline Uint64 hash64bit(Uint64 x) { + x = (x ^ (x >> 30)) * 0xbf58476d1ce4e5b9ull; + x = (x ^ (x >> 27)) * 0x94d049bb133111ebull; + x = x ^ (x >> 31); + return x; +} +} // namespace + +namespace manifold { +/** @addtogroup Private + * @{ + */ + +template +class HashTableD { + public: + HashTableD(Vec& keys, Vec& values, std::atomic& used, + uint32_t step = 1) + : step_{step}, keys_{keys}, values_{values}, used_{used} {} + + int Size() const { return keys_.size(); } + + bool Full() const { + return used_.load(std::memory_order_relaxed) * 2 > Size(); + } + + void Insert(Uint64 key, const V& val) { + uint32_t idx = H(key) & (Size() - 1); + while (1) { + if (Full()) return; + Uint64& k = keys_[idx]; + const Uint64 found = AtomicCAS(k, kOpen, key); + if (found == kOpen) { + used_.fetch_add(1, std::memory_order_relaxed); + values_[idx] = val; + return; + } + if (found == key) return; + idx = (idx + step_) & (Size() - 1); + } + } + + V& operator[](Uint64 key) { + uint32_t idx = H(key) & (Size() - 1); + while (1) { + const Uint64 k = AtomicLoad(keys_[idx]); + if (k == key || k == kOpen) { + return values_[idx]; + } + idx = (idx + step_) & (Size() - 1); + } + } + + const V& operator[](Uint64 key) const { + uint32_t idx = H(key) & (Size() - 1); + while (1) { + const Uint64 k = AtomicLoad(keys_[idx]); + if (k == key || k == kOpen) { + return values_[idx]; + } + idx = (idx + step_) & (Size() - 1); + } + } + + Uint64 KeyAt(int idx) const { return AtomicLoad(keys_[idx]); } + V& At(int idx) { return values_[idx]; } + const V& At(int idx) const { return values_[idx]; } + + private: + uint32_t step_; + VecView keys_; + VecView values_; + std::atomic& used_; +}; + +template +class HashTable { + public: + HashTable(size_t size, uint32_t step = 1) + : keys_{size == 0 ? 0 : 1_z << (int)ceil(log2(size)), kOpen}, + values_{size == 0 ? 0 : 1_z << (int)ceil(log2(size)), {}}, + step_(step) {} + + HashTable(const HashTable& other) + : keys_(other.keys_), + values_(other.values_), + used_(other.used_), + step_(other.step_) {} + + HashTable& operator=(const HashTable& other) { + if (this == &other) return *this; + keys_ = other.keys_; + values_ = other.values_; + used_.store(other.used_.load()); + step_ = other.step_; + return *this; + } + + HashTableD D() { return {keys_, values_, used_, step_}; } + + int Entries() const { return used_.load(std::memory_order_relaxed); } + + size_t Size() const { return keys_.size(); } + + bool Full() const { + return used_.load(std::memory_order_relaxed) * 2 > Size(); + } + + float FilledFraction() const { + return static_cast(used_.load(std::memory_order_relaxed)) / Size(); + } + + Vec& GetValueStore() { return values_; } + + static Uint64 Open() { return kOpen; } + + private: + Vec keys_; + Vec values_; + std::atomic used_ = 0; + uint32_t step_; +}; + +/** @} */ +} // namespace manifold diff --git a/thirdparty/manifold/src/utilities/include/optional_assert.h b/thirdparty/manifold/src/utilities/include/optional_assert.h new file mode 100644 index 000000000000..75f32684a12d --- /dev/null +++ b/thirdparty/manifold/src/utilities/include/optional_assert.h @@ -0,0 +1,38 @@ +// Copyright 2022 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "public.h" + +#ifdef MANIFOLD_DEBUG +#include +#include +#include + +template +void Assert(bool condition, const char* file, int line, const std::string& cond, + const std::string& msg) { + if (!condition) { + std::ostringstream output; + output << "Error in file: " << file << " (" << line << "): \'" << cond + << "\' is false: " << msg; + throw Ex(output.str()); + } +} +#define ASSERT(condition, EX, msg) \ + Assert(condition, __FILE__, __LINE__, #condition, msg); +#else +#define ASSERT(condition, EX, msg) +#endif \ No newline at end of file diff --git a/thirdparty/manifold/src/utilities/include/par.h b/thirdparty/manifold/src/utilities/include/par.h new file mode 100644 index 000000000000..2049a5011d70 --- /dev/null +++ b/thirdparty/manifold/src/utilities/include/par.h @@ -0,0 +1,195 @@ +// Copyright 2022 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#if MANIFOLD_PAR == 'T' +#include + +#if MANIFOLD_PAR == 'T' && TBB_INTERFACE_VERSION >= 10000 && \ + __has_include() +#include +#endif + +#include "tbb/tbb.h" +#define MANIFOLD_PAR_NS tbb +#else +#define MANIFOLD_PAR_NS cpp +#endif + +namespace manifold { + +enum class ExecutionPolicy { + Par, + Seq, +}; + +// ExecutionPolicy: +// - Sequential for small workload, +// - Parallel (CPU) for medium workload, +// - GPU for large workload if available. +inline constexpr ExecutionPolicy autoPolicy(size_t size) { + // some random numbers + if (size <= (1 << 12)) { + return ExecutionPolicy::Seq; + } + return ExecutionPolicy::Par; +} + +#define THRUST_DYNAMIC_BACKEND_VOID(NAME) \ + template \ + void NAME(ExecutionPolicy policy, Args... args) { \ + switch (policy) { \ + case ExecutionPolicy::Par: \ + thrust::NAME(thrust::MANIFOLD_PAR_NS::par, args...); \ + break; \ + case ExecutionPolicy::Seq: \ + thrust::NAME(thrust::cpp::par, args...); \ + break; \ + } \ + } + +#define THRUST_DYNAMIC_BACKEND(NAME, RET) \ + template \ + Ret NAME(ExecutionPolicy policy, Args... args) { \ + switch (policy) { \ + case ExecutionPolicy::Par: \ + return thrust::NAME(thrust::MANIFOLD_PAR_NS::par, args...); \ + case ExecutionPolicy::Seq: \ + break; \ + } \ + return thrust::NAME(thrust::cpp::par, args...); \ + } + +#if MANIFOLD_PAR != 'T' || \ + (TBB_INTERFACE_VERSION >= 10000 && __has_include()) +#if MANIFOLD_PAR == 'T' +#define STL_DYNAMIC_BACKEND(NAME, RET) \ + template \ + Ret NAME(ExecutionPolicy policy, Args... args) { \ + switch (policy) { \ + case ExecutionPolicy::Par: \ + return std::NAME(std::execution::par_unseq, args...); \ + case ExecutionPolicy::Seq: \ + break; \ + } \ + return std::NAME(args...); \ + } +#define STL_DYNAMIC_BACKEND_VOID(NAME) \ + template \ + void NAME(ExecutionPolicy policy, Args... args) { \ + switch (policy) { \ + case ExecutionPolicy::Par: \ + std::NAME(std::execution::par_unseq, args...); \ + break; \ + case ExecutionPolicy::Seq: \ + std::NAME(args...); \ + break; \ + } \ + } +#else +#define STL_DYNAMIC_BACKEND(NAME, RET) \ + template \ + Ret NAME(ExecutionPolicy policy, Args... args) { \ + return std::NAME(args...); \ + } +#define STL_DYNAMIC_BACKEND_VOID(NAME) \ + template \ + void NAME(ExecutionPolicy policy, Args... args) { \ + std::NAME(args...); \ + } +#endif + +template +void exclusive_scan(ExecutionPolicy policy, Args... args) { + // https://github.com/llvm/llvm-project/issues/59810 + std::exclusive_scan(args...); +} +template +OutputIterator copy_if(ExecutionPolicy policy, InputIterator1 first, + InputIterator1 last, InputIterator2 stencil, + OutputIterator result, Predicate pred) { + if (policy == ExecutionPolicy::Seq) + return thrust::copy_if(thrust::cpp::par, first, last, stencil, result, + pred); + else + // note: this is not a typo, see + // https://github.com/NVIDIA/thrust/issues/1977 + return thrust::copy_if(first, last, stencil, result, pred); +} +template +OutputIterator copy_if(ExecutionPolicy policy, InputIterator1 first, + InputIterator1 last, OutputIterator result, + Predicate pred) { +#if MANIFOLD_PAR == 'T' + if (policy == ExecutionPolicy::Seq) + return std::copy_if(first, last, result, pred); + else + return std::copy_if(std::execution::par_unseq, first, last, result, pred); +#else + return std::copy_if(first, last, result, pred); +#endif +} + +#else +#define STL_DYNAMIC_BACKEND(NAME, RET) THRUST_DYNAMIC_BACKEND(NAME, RET) +#define STL_DYNAMIC_BACKEND_VOID(NAME) THRUST_DYNAMIC_BACKEND_VOID(NAME) + +THRUST_DYNAMIC_BACKEND_VOID(exclusive_scan) +THRUST_DYNAMIC_BACKEND(copy_if, void) +#endif + +THRUST_DYNAMIC_BACKEND_VOID(gather) +THRUST_DYNAMIC_BACKEND_VOID(scatter) +THRUST_DYNAMIC_BACKEND_VOID(for_each) +THRUST_DYNAMIC_BACKEND_VOID(for_each_n) +THRUST_DYNAMIC_BACKEND_VOID(sequence) +STL_DYNAMIC_BACKEND_VOID(transform) +STL_DYNAMIC_BACKEND_VOID(uninitialized_fill) +STL_DYNAMIC_BACKEND_VOID(uninitialized_copy) +STL_DYNAMIC_BACKEND_VOID(stable_sort) +STL_DYNAMIC_BACKEND_VOID(fill) +STL_DYNAMIC_BACKEND_VOID(copy) +STL_DYNAMIC_BACKEND_VOID(inclusive_scan) +STL_DYNAMIC_BACKEND_VOID(copy_n) + +// void implies that the user have to specify the return type in the template +// argument, as we are unable to deduce it +THRUST_DYNAMIC_BACKEND(transform_reduce, void) +THRUST_DYNAMIC_BACKEND(gather_if, void) +THRUST_DYNAMIC_BACKEND(reduce_by_key, void) +STL_DYNAMIC_BACKEND(remove, void) +STL_DYNAMIC_BACKEND(find, void) +STL_DYNAMIC_BACKEND(find_if, void) +STL_DYNAMIC_BACKEND(all_of, bool) +STL_DYNAMIC_BACKEND(is_sorted, bool) +STL_DYNAMIC_BACKEND(reduce, void) +STL_DYNAMIC_BACKEND(count_if, int) +STL_DYNAMIC_BACKEND(remove_if, void) + +} // namespace manifold diff --git a/thirdparty/manifold/src/utilities/include/public.h b/thirdparty/manifold/src/utilities/include/public.h new file mode 100644 index 000000000000..b9dfa9d283af --- /dev/null +++ b/thirdparty/manifold/src/utilities/include/public.h @@ -0,0 +1,696 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#define GLM_ENABLE_EXPERIMENTAL // needed for glm/gtx/compatibility.hpp +#define GLM_FORCE_EXPLICIT_CTOR +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef MANIFOLD_DEBUG +#include +#include +#include +#endif + +constexpr std::size_t operator""_z(unsigned long long n) { return n; } + +namespace manifold { + +constexpr float kTolerance = 1e-5; + +/** @defgroup Connections + * @brief Move data in and out of the Manifold class. + * @{ + */ + +/** + * Sine function where multiples of 90 degrees come out exact. + * + * @param x Angle in degrees. + */ +inline float sind(float x) { + if (!std::isfinite(x)) return sin(x); + if (x < 0.0f) return -sind(-x); + int quo; + x = remquo(fabs(x), 90.0f, &quo); + switch (quo % 4) { + case 0: + return sin(glm::radians(x)); + case 1: + return cos(glm::radians(x)); + case 2: + return -sin(glm::radians(x)); + case 3: + return -cos(glm::radians(x)); + } + return 0.0f; +} + +/** + * Cosine function where multiples of 90 degrees come out exact. + * + * @param x Angle in degrees. + */ +inline float cosd(float x) { return sind(x + 90.0f); } + +/** + * This 4x3 matrix can be used as an input to Manifold.Transform() to turn an + * object. Turns along the shortest path from given up-vector to (0, 0, 1). + * + * @param up The vector to be turned to point upwards. Length does not matter. + */ +inline glm::mat4x3 RotateUp(glm::vec3 up) { + up = glm::normalize(up); + glm::vec3 axis = glm::cross(up, {0, 0, 1}); + float angle = glm::asin(glm::length(axis)); + if (glm::dot(up, {0, 0, 1}) < 0) angle = glm::pi() - angle; + return glm::mat4x3(glm::rotate(glm::mat4(1), angle, axis)); +} + +/** + * Determines if the three points are wound counter-clockwise, clockwise, or + * colinear within the specified tolerance. + * + * @param p0 First point + * @param p1 Second point + * @param p2 Third point + * @param tol Tolerance value for colinearity + * @return int, like Signum, this returns 1 for CCW, -1 for CW, and 0 if within + * tol of colinear. + */ +inline int CCW(glm::vec2 p0, glm::vec2 p1, glm::vec2 p2, float tol) { + glm::vec2 v1 = p1 - p0; + glm::vec2 v2 = p2 - p0; + float area = v1.x * v2.y - v1.y * v2.x; + float base2 = glm::max(glm::dot(v1, v1), glm::dot(v2, v2)); + if (area * area * 4 <= base2 * tol * tol) + return 0; + else + return area > 0 ? 1 : -1; +} + +/** + * Single polygon contour, wound CCW. First and last point are implicitly + * connected. Should ensure all input is + * [ε-valid](https://github.com/elalish/manifold/wiki/Manifold-Library#definition-of-%CE%B5-valid). + */ +using SimplePolygon = std::vector; + +/** + * Set of polygons with holes. Order of contours is arbitrary. Can contain any + * depth of nested holes and any number of separate polygons. Should ensure all + * input is + * [ε-valid](https://github.com/elalish/manifold/wiki/Manifold-Library#definition-of-%CE%B5-valid). + */ +using Polygons = std::vector; + +/** + * The triangle-mesh input and output of this library. + */ +struct Mesh { + /// Required: The X-Y-Z positions of all vertices. + std::vector vertPos; + /// Required: The vertex indices of the three triangle corners in CCW (from + /// the outside) order, for each triangle. + std::vector triVerts; + /// Optional: The X-Y-Z normal vectors of each vertex. If non-empty, must have + /// the same length as vertPos. If empty, these will be calculated + /// automatically. + std::vector vertNormal; + /// Optional: The X-Y-Z-W weighted tangent vectors for smooth Refine(). If + /// non-empty, must be exactly three times as long as Mesh.triVerts. Indexed + /// as 3 * tri + i, representing the tangent from Mesh.triVerts[tri][i] along + /// the CCW edge. If empty, mesh is faceted. + std::vector halfedgeTangent; + /// The absolute precision of the vertex positions, based on accrued rounding + /// errors. When creating a Manifold, the precision used will be the maximum + /// of this and a baseline precision from the size of the bounding box. Any + /// edge shorter than precision may be collapsed. + float precision = 0; +}; + +/** + * Defines which edges to sharpen and how much for the Manifold.Smooth() + * constructor. + */ +struct Smoothness { + /// The halfedge index = 3 * tri + i, referring to Mesh.triVerts[tri][i]. + int halfedge; + /// A value between 0 and 1, where 0 is sharp and 1 is the default and the + /// curvature is interpolated between these values. The two paired halfedges + /// can have different values while maintaining C-1 continuity (except for 0). + float smoothness; +}; + +/** + * Geometric properties of the manifold, created with Manifold.GetProperties(). + */ +struct Properties { + float surfaceArea, volume; +}; + +struct Box { + glm::vec3 min = glm::vec3(std::numeric_limits::infinity()); + glm::vec3 max = glm::vec3(-std::numeric_limits::infinity()); + + /** + * Default constructor is an infinite box that contains all space. + */ + Box() {} + + /** + * Creates a box that contains the two given points. + */ + Box(const glm::vec3 p1, const glm::vec3 p2) { + min = glm::min(p1, p2); + max = glm::max(p1, p2); + } + + /** + * Returns the dimensions of the Box. + */ + glm::vec3 Size() const { return max - min; } + + /** + * Returns the center point of the Box. + */ + glm::vec3 Center() const { return 0.5f * (max + min); } + + /** + * Returns the absolute-largest coordinate value of any contained + * point. + */ + float Scale() const { + glm::vec3 absMax = glm::max(glm::abs(min), glm::abs(max)); + return glm::max(absMax.x, glm::max(absMax.y, absMax.z)); + } + + /** + * Does this box contain (includes equal) the given point? + */ + bool Contains(const glm::vec3& p) const { + return glm::all(glm::greaterThanEqual(p, min)) && + glm::all(glm::greaterThanEqual(max, p)); + } + + /** + * Does this box contain (includes equal) the given box? + */ + bool Contains(const Box& box) const { + return glm::all(glm::greaterThanEqual(box.min, min)) && + glm::all(glm::greaterThanEqual(max, box.max)); + } + + /** + * Expand this box to include the given point. + */ + void Union(const glm::vec3 p) { + min = glm::min(min, p); + max = glm::max(max, p); + } + + /** + * Expand this box to include the given box. + */ + Box Union(const Box& box) const { + Box out; + out.min = glm::min(min, box.min); + out.max = glm::max(max, box.max); + return out; + } + + /** + * Transform the given box by the given axis-aligned affine transform. + * + * Ensure the transform passed in is axis-aligned (rotations are all + * multiples of 90 degrees), or else the resulting bounding box will no longer + * bound properly. + */ + Box Transform(const glm::mat4x3& transform) const { + Box out; + glm::vec3 minT = transform * glm::vec4(min, 1.0f); + glm::vec3 maxT = transform * glm::vec4(max, 1.0f); + out.min = glm::min(minT, maxT); + out.max = glm::max(minT, maxT); + return out; + } + + /** + * Shift this box by the given vector. + */ + Box operator+(glm::vec3 shift) const { + Box out; + out.min = min + shift; + out.max = max + shift; + return out; + } + + /** + * Shift this box in-place by the given vector. + */ + Box& operator+=(glm::vec3 shift) { + min += shift; + max += shift; + return *this; + } + + /** + * Scale this box by the given vector. + */ + Box operator*(glm::vec3 scale) const { + Box out; + out.min = min * scale; + out.max = max * scale; + return out; + } + + /** + * Scale this box in-place by the given vector. + */ + Box& operator*=(glm::vec3 scale) { + min *= scale; + max *= scale; + return *this; + } + + /** + * Does this box overlap the one given (including equality)? + */ + inline bool DoesOverlap(const Box& box) const { + return min.x <= box.max.x && min.y <= box.max.y && min.z <= box.max.z && + max.x >= box.min.x && max.y >= box.min.y && max.z >= box.min.z; + } + + /** + * Does the given point project within the XY extent of this box + * (including equality)? + */ + inline bool DoesOverlap(glm::vec3 p) const { // projected in z + return p.x <= max.x && p.x >= min.x && p.y <= max.y && p.y >= min.y; + } + + /** + * Does this box have finite bounds? + */ + bool IsFinite() const { + return glm::all(glm::isfinite(min)) && glm::all(glm::isfinite(max)); + } +}; + +/** + * Axis-aligned rectangular bounds. + */ +struct Rect { + glm::vec2 min = glm::vec2(std::numeric_limits::infinity()); + glm::vec2 max = glm::vec2(-std::numeric_limits::infinity()); + + /** + * Default constructor is an empty rectangle.. + */ + Rect() {} + + /** + * Create a rectangle that contains the two given points. + */ + Rect(const glm::vec2 a, const glm::vec2 b) { + min = glm::min(a, b); + max = glm::max(a, b); + } + + /** @name Information + * Details of the rectangle + */ + ///@{ + + /** + * Return the dimensions of the rectangle. + */ + glm::vec2 Size() const { return max - min; } + + /** + * Return the area of the rectangle. + */ + float Area() const { + auto sz = Size(); + return sz.x * sz.y; + } + + /** + * Returns the absolute-largest coordinate value of any contained + * point. + */ + float Scale() const { + glm::vec2 absMax = glm::max(glm::abs(min), glm::abs(max)); + return glm::max(absMax.x, absMax.y); + } + + /** + * Returns the center point of the rectangle. + */ + glm::vec2 Center() const { return 0.5f * (max + min); } + + /** + * Does this rectangle contain (includes on border) the given point? + */ + bool Contains(const glm::vec2& p) const { + return glm::all(glm::greaterThanEqual(p, min)) && + glm::all(glm::greaterThanEqual(max, p)); + } + + /** + * Does this rectangle contain (includes equal) the given rectangle? + */ + bool Contains(const Rect& rect) const { + return glm::all(glm::greaterThanEqual(rect.min, min)) && + glm::all(glm::greaterThanEqual(max, rect.max)); + } + + /** + * Does this rectangle overlap the one given (including equality)? + */ + bool DoesOverlap(const Rect& rect) const { + return min.x <= rect.max.x && min.y <= rect.max.y && max.x >= rect.min.x && + max.y >= rect.min.y; + } + + /** + * Is the rectangle empty (containing no space)? + */ + bool IsEmpty() const { return max.y <= min.y || max.x <= min.x; }; + + /** + * Does this recangle have finite bounds? + */ + bool IsFinite() const { + return glm::all(glm::isfinite(min)) && glm::all(glm::isfinite(max)); + } + + ///@} + + /** @name Modification + */ + ///@{ + + /** + * Expand this rectangle (in place) to include the given point. + */ + void Union(const glm::vec2 p) { + min = glm::min(min, p); + max = glm::max(max, p); + } + + /** + * Expand this rectangle to include the given Rect. + */ + Rect Union(const Rect& rect) const { + Rect out; + out.min = glm::min(min, rect.min); + out.max = glm::max(max, rect.max); + return out; + } + + /** + * Shift this rectangle by the given vector. + */ + Rect operator+(const glm::vec2 shift) const { + Rect out; + out.min = min + shift; + out.max = max + shift; + return out; + } + + /** + * Shift this rectangle in-place by the given vector. + */ + Rect& operator+=(const glm::vec2 shift) { + min += shift; + max += shift; + return *this; + } + + /** + * Scale this rectangle by the given vector. + */ + Rect operator*(const glm::vec2 scale) const { + Rect out; + out.min = min * scale; + out.max = max * scale; + return out; + } + + /** + * Scale this rectangle in-place by the given vector. + */ + Rect& operator*=(const glm::vec2 scale) { + min *= scale; + max *= scale; + return *this; + } + + /** + * Transform the rectangle by the given axis-aligned affine transform. + * + * Ensure the transform passed in is axis-aligned (rotations are all + * multiples of 90 degrees), or else the resulting rectangle will no longer + * bound properly. + */ + Rect Transform(const glm::mat3x2& m) const { + Rect rect; + rect.min = m * glm::vec3(min, 1); + rect.max = m * glm::vec3(max, 1); + return rect; + } + ///@} +}; +/** @} */ + +/** @addtogroup Core + * @{ + */ + +/** + * Boolean operation type: Add (Union), Subtract (Difference), and Intersect. + */ +enum class OpType { Add, Subtract, Intersect }; + +/** + * These static properties control how circular shapes are quantized by + * default on construction. If circularSegments is specified, it takes + * precedence. If it is zero, then instead the minimum is used of the segments + * calculated based on edge length and angle, rounded up to the nearest + * multiple of four. To get numbers not divisible by four, circularSegments + * must be specified. + */ +class Quality { + private: + inline static int circularSegments_ = 0; + inline static float circularAngle_ = 10.0f; + inline static float circularEdgeLength_ = 1.0f; + + public: + /** + * Sets an angle constraint the default number of circular segments for the + * CrossSection::Circle(), Manifold::Cylinder(), Manifold::Sphere(), and + * Manifold::Revolve() constructors. The number of segments will be rounded up + * to the nearest factor of four. + * + * @param angle The minimum angle in degrees between consecutive segments. The + * angle will increase if the the segments hit the minimum edge length. + * Default is 10 degrees. + */ + static void SetMinCircularAngle(float angle) { + if (angle <= 0) return; + circularAngle_ = angle; + } + + /** + * Sets a length constraint the default number of circular segments for the + * CrossSection::Circle(), Manifold::Cylinder(), Manifold::Sphere(), and + * Manifold::Revolve() constructors. The number of segments will be rounded up + * to the nearest factor of four. + * + * @param length The minimum length of segments. The length will + * increase if the the segments hit the minimum angle. Default is 1.0. + */ + static void SetMinCircularEdgeLength(float length) { + if (length <= 0) return; + circularEdgeLength_ = length; + } + + /** + * Sets the default number of circular segments for the + * CrossSection::Circle(), Manifold::Cylinder(), Manifold::Sphere(), and + * Manifold::Revolve() constructors. Overrides the edge length and angle + * constraints and sets the number of segments to exactly this value. + * + * @param number Number of circular segments. Default is 0, meaning no + * constraint is applied. + */ + static void SetCircularSegments(int number) { + if (number < 3 && number != 0) return; + circularSegments_ = number; + } + + /** + * Determine the result of the SetMinCircularAngle(), + * SetMinCircularEdgeLength(), and SetCircularSegments() defaults. + * + * @param radius For a given radius of circle, determine how many default + * segments there will be. + */ + static int GetCircularSegments(float radius) { + if (circularSegments_ > 0) return circularSegments_; + int nSegA = 360.0f / circularAngle_; + int nSegL = 2.0f * radius * glm::pi() / circularEdgeLength_; + int nSeg = fmin(nSegA, nSegL) + 3; + nSeg -= nSeg % 4; + return std::max(nSeg, 3); + } +}; +/** @} */ + +/** @defgroup Debug + * @brief Debugging features + * + * The features require compiler flags to be enabled. Assertions are enabled + * with the MANIFOLD_DEBUG flag and then controlled with ExecutionParams. + * Exceptions are only thrown if the MANIFOLD_EXCEPTIONS flag is set. Import and + * Export of 3D models is only supported with the MANIFOLD_EXPORT flag, which + * also requires linking in the Assimp dependency. + * @{ + */ + +/** @defgroup Exceptions + * @brief Custom Exceptions + * @{ + */ +#ifdef MANIFOLD_DEBUG +struct userErr : public virtual std::runtime_error { + using std::runtime_error::runtime_error; +}; +struct topologyErr : public virtual std::runtime_error { + using std::runtime_error::runtime_error; +}; +struct geometryErr : public virtual std::runtime_error { + using std::runtime_error::runtime_error; +}; +using logicErr = std::logic_error; +#endif +/** @} */ + +/** + * Global parameters that control debugging output. Only has an + * effect when compiled with the MANIFOLD_DEBUG flag. + */ +struct ExecutionParams { + /// Perform extra sanity checks and assertions on the intermediate data + /// structures. + bool intermediateChecks = false; + /// Verbose output primarily of the Boolean, including timing info and vector + /// sizes. + bool verbose = false; + /// If processOverlaps is false, a geometric check will be performed to assert + /// all triangles are CCW. + bool processOverlaps = true; + /// Suppresses printed errors regarding CW triangles. Has no effect if + /// processOverlaps is true. + bool suppressErrors = false; + /// Deterministic outputs. Will disable some parallel optimizations. + bool deterministic = false; + /// Perform optional but recommended triangle cleanups in SimplifyTopology() + bool cleanupTriangles = true; +}; + +#ifdef MANIFOLD_DEBUG + +template +inline std::ostream& operator<<(std::ostream& stream, const glm::tvec2& v) { + return stream << "x = " << v.x << ", y = " << v.y; +} + +template +inline std::ostream& operator<<(std::ostream& stream, const glm::tvec3& v) { + return stream << "x = " << v.x << ", y = " << v.y << ", z = " << v.z; +} + +template +inline std::ostream& operator<<(std::ostream& stream, const glm::tvec4& v) { + return stream << "x = " << v.x << ", y = " << v.y << ", z = " << v.z + << ", w = " << v.w; +} + +inline std::ostream& operator<<(std::ostream& stream, const glm::mat3& mat) { + glm::mat3 tam = glm::transpose(mat); + return stream << tam[0] << std::endl + << tam[1] << std::endl + << tam[2] << std::endl; +} + +inline std::ostream& operator<<(std::ostream& stream, const glm::mat4x3& mat) { + glm::mat3x4 tam = glm::transpose(mat); + return stream << tam[0] << std::endl + << tam[1] << std::endl + << tam[2] << std::endl; +} + +inline std::ostream& operator<<(std::ostream& stream, const Box& box) { + return stream << "min: " << box.min << ", " + << "max: " << box.max; +} + +inline std::ostream& operator<<(std::ostream& stream, const Rect& box) { + return stream << "min: " << box.min << ", " + << "max: " << box.max; +} + +/** + * Print the contents of this vector to standard output. Only exists if compiled + * with MANIFOLD_DEBUG flag. + */ +template +void Dump(const std::vector& vec) { + std::cout << "Vec = " << std::endl; + for (int i = 0; i < vec.size(); ++i) { + std::cout << i << ", " << vec[i] << ", " << std::endl; + } + std::cout << std::endl; +} + +template +void Diff(const std::vector& a, const std::vector& b) { + std::cout << "Diff = " << std::endl; + if (a.size() != b.size()) { + std::cout << "a and b must have the same length, aborting Diff" + << std::endl; + return; + } + for (int i = 0; i < a.size(); ++i) { + if (a[i] != b[i]) + std::cout << i << ": " << a[i] << ", " << b[i] << std::endl; + } + std::cout << std::endl; +} +/** @} */ +#endif +} // namespace manifold + +#undef HOST_DEVICE diff --git a/thirdparty/manifold/src/utilities/include/sparse.h b/thirdparty/manifold/src/utilities/include/sparse.h new file mode 100644 index 000000000000..05a711c44e6a --- /dev/null +++ b/thirdparty/manifold/src/utilities/include/sparse.h @@ -0,0 +1,201 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include + +#include "optional_assert.h" +#include "par.h" +#include "public.h" +#include "utils.h" +#include "vec.h" + +namespace manifold { + +/** @ingroup Private */ +class SparseIndices { + // sparse indices where {p1: q1, p2: q2, ...} are laid out as + // p1 q1 p2 q2 or q1 p1 q2 p2, depending on endianness + // such that the indices are sorted by (p << 32) | q + public: +#if defined(__BYTE_ORDER) && __BYTE_ORDER == __BIG_ENDIAN || \ + defined(__BIG_ENDIAN__) || defined(__ARMEB__) || defined(__THUMBEB__) || \ + defined(__AARCH64EB__) || defined(_MIBSEB) || defined(__MIBSEB) || \ + defined(__MIBSEB__) + static constexpr size_t pOffset = 0; +#elif defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN || \ + defined(__LITTLE_ENDIAN__) || defined(__ARMEL__) || \ + defined(__THUMBEL__) || defined(__AARCH64EL__) || defined(_MIPSEL) || \ + defined(__MIPSEL) || defined(__MIPSEL__) || defined(__EMSCRIPTEN__) || \ + defined(_WIN32) + static constexpr size_t pOffset = 1; +#else +#error "unknown architecture" +#endif + static constexpr int64_t EncodePQ(int p, int q) { + return (int64_t(p) << 32) | q; + } + + SparseIndices() = default; + SparseIndices(size_t size) { data_ = Vec(size * sizeof(int64_t)); } + + size_t size() const { return data_.size() / sizeof(int64_t); } + + Vec Copy(bool use_q) const { + Vec out(size()); + size_t offset = pOffset; + if (use_q) offset = 1 - offset; + const int* p = ptr(); + for_each(autoPolicy(out.size()), countAt(0_z), countAt(out.size()), + [&](size_t i) { out[i] = p[i * 2 + offset]; }); + return out; + } + + void Sort() { + VecView view = AsVec64(); + stable_sort(autoPolicy(size()), view.begin(), view.end()); + } + + void Resize(size_t size) { data_.resize(size * sizeof(int64_t), -1); } + + inline int& Get(size_t i, bool use_q) { + if (use_q) + return ptr()[2 * i + 1 - pOffset]; + else + return ptr()[2 * i + pOffset]; + } + + inline int Get(size_t i, bool use_q) const { + if (use_q) + return ptr()[2 * i + 1 - pOffset]; + else + return ptr()[2 * i + pOffset]; + } + + inline int64_t GetPQ(size_t i) const { + VecView view = AsVec64(); + return view[i]; + } + + inline void Set(size_t i, int p, int q) { + VecView view = AsVec64(); + view[i] = EncodePQ(p, q); + } + + inline void SetPQ(size_t i, int64_t pq) { + VecView view = AsVec64(); + view[i] = pq; + } + + VecView AsVec64() { + return VecView(reinterpret_cast(data_.data()), + data_.size() / sizeof(int64_t)); + } + + VecView AsVec64() const { + return VecView( + reinterpret_cast(data_.data()), + data_.size() / sizeof(int64_t)); + } + + VecView AsVec32() { + return VecView(reinterpret_cast(data_.data()), + data_.size() / sizeof(int32_t)); + } + + VecView AsVec32() const { + return VecView( + reinterpret_cast(data_.data()), + data_.size() / sizeof(int32_t)); + } + + inline void Add(int p, int q) { + for (int i = 0; i < sizeof(int64_t); ++i) data_.push_back(-1); + Set(size() - 1, p, q); + } + + void Unique() { + Sort(); + VecView view = AsVec64(); + size_t newSize = std::unique(view.begin(), view.end()) - view.begin(); + Resize(newSize); + } + + size_t RemoveZeros(Vec& S) { + ASSERT(S.size() == size(), userErr, + "Different number of values than indicies!"); + VecView view = AsVec64(); + auto zBegin = zip(S.begin(), view.begin()); + auto zEnd = zip(S.end(), view.end()); + size_t size = + remove_if(autoPolicy(S.size()), zBegin, zEnd, + [](thrust::tuple x) { + return thrust::get<0>(x) == 0; + }) - + zBegin; + S.resize(size, -1); + Resize(size); + return size; + } + + template + struct firstNonFinite { + bool NotFinite(float v) const { return !isfinite(v); } + bool NotFinite(glm::vec2 v) const { return !isfinite(v[0]); } + bool NotFinite(glm::vec3 v) const { return !isfinite(v[0]); } + bool NotFinite(glm::vec4 v) const { return !isfinite(v[0]); } + + bool operator()(thrust::tuple x) const { + bool result = NotFinite(thrust::get<0>(x)); + return result; + } + }; + + template + size_t KeepFinite(Vec& v, Vec& x) { + ASSERT(x.size() == size(), userErr, + "Different number of values than indicies!"); + VecView view = AsVec64(); + auto zBegin = zip(v.begin(), x.begin(), view.begin()); + auto zEnd = zip(v.end(), x.end(), view.end()); + size_t size = remove_if(autoPolicy(v.size()), zBegin, + zEnd, firstNonFinite()) - + zBegin; + v.resize(size); + x.resize(size); + Resize(size); + return size; + } + +#ifdef MANIFOLD_DEBUG + void Dump() const { + std::cout << "SparseIndices = " << std::endl; + const int* p = ptr(); + for (size_t i = 0; i < size(); ++i) { + std::cout << i << ", p = " << Get(i, false) << ", q = " << Get(i, true) + << std::endl; + } + std::cout << std::endl; + } +#endif + + private: + Vec data_; + inline int* ptr() { return reinterpret_cast(data_.data()); } + inline const int* ptr() const { + return reinterpret_cast(data_.data()); + } +}; + +} // namespace manifold diff --git a/thirdparty/manifold/src/utilities/include/svd.h b/thirdparty/manifold/src/utilities/include/svd.h new file mode 100644 index 000000000000..243d898bc9cb --- /dev/null +++ b/thirdparty/manifold/src/utilities/include/svd.h @@ -0,0 +1,310 @@ +// MIT License + +// Copyright (c) 2019 wi-re +// Copyright 2023 The Manifold Authors. + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Modified from https://github.com/wi-re/tbtSVD, removing CUDA dependence and +// approximate inverse square roots. + +#include +#include +#include +#include +#include + +namespace { +// Constants used for calculation of Givens quaternions +inline constexpr float _gamma = 5.828427124f; // sqrt(8)+3; +inline constexpr float _cStar = 0.923879532f; // cos(pi/8) +inline constexpr float _sStar = 0.3826834323f; // sin(pi/8) +// Threshold value +inline constexpr float _SVD_EPSILON = 1e-6f; +// Iteration counts for Jacobi Eigen Analysis, influences precision +inline constexpr int JACOBI_STEPS = 12; + +// Helper function used to swap X with Y and Y with X if c == true +inline void CondSwap(bool c, float& X, float& Y) { + float Z = X; + X = c ? Y : X; + Y = c ? Z : Y; +} +// Helper function used to swap X with Y and Y with -X if c == true +inline void CondNegSwap(bool c, float& X, float& Y) { + float Z = -X; + X = c ? Y : X; + Y = c ? Z : Y; +} +// A simple symmetric 3x3 Matrix class (contains no storage for (0, 1) (0, 2) +// and (1, 2) +struct Symmetric3x3 { + float m_00 = 1.f; + float m_10 = 0.f, m_11 = 1.f; + float m_20 = 0.f, m_21 = 0.f, m_22 = 1.f; + + Symmetric3x3(float a11 = 1.f, float a21 = 0.f, float a22 = 1.f, + float a31 = 0.f, float a32 = 0.f, float a33 = 1.f) + : m_00(a11), m_10(a21), m_11(a22), m_20(a31), m_21(a32), m_22(a33) {} + Symmetric3x3(glm::mat3 o) + : m_00(o[0][0]), + m_10(o[0][1]), + m_11(o[1][1]), + m_20(o[0][2]), + m_21(o[1][2]), + m_22(o[2][2]) {} +}; +// Helper struct to store 2 floats to avoid OUT parameters on functions +struct Givens { + float ch = _cStar; + float sh = _sStar; +}; +// Helper struct to store 2 Matrices to avoid OUT parameters on functions +struct QR { + glm::mat3 Q, R; +}; +// Calculates the squared norm of the vector. +inline float Dist2(glm::vec3 v) { return glm::dot(v, v); } +// For an explanation of the math see +// http://pages.cs.wisc.edu/~sifakis/papers/SVD_TR1690.pdf Computing the +// Singular Value Decomposition of 3 x 3 matrices with minimal branching and +// elementary floating point operations See Algorithm 2 in reference. Given a +// matrix A this function returns the Givens quaternion (x and w component, y +// and z are 0) +inline Givens ApproximateGivensQuaternion(Symmetric3x3& A) { + Givens g{2.f * (A.m_00 - A.m_11), A.m_10}; + bool b = _gamma * g.sh * g.sh < g.ch * g.ch; + float w = 1.f / sqrt(fmaf(g.ch, g.ch, g.sh * g.sh)); + if (w != w) b = 0; + return Givens{b ? w * g.ch : (float)_cStar, b ? w * g.sh : (float)_sStar}; +} +// Function used to apply a Givens rotation S. Calculates the weights and +// updates the quaternion to contain the cumulative rotation +inline void JacobiConjugation(const int32_t x, const int32_t y, const int32_t z, + Symmetric3x3& S, glm::vec4& q) { + auto g = ApproximateGivensQuaternion(S); + float scale = 1.f / fmaf(g.ch, g.ch, g.sh * g.sh); + float a = fmaf(g.ch, g.ch, -g.sh * g.sh) * scale; + float b = 2.f * g.sh * g.ch * scale; + Symmetric3x3 _S = S; + // perform conjugation S = Q'*S*Q + S.m_00 = fmaf(a, fmaf(a, _S.m_00, b * _S.m_10), + b * (fmaf(a, _S.m_10, b * _S.m_11))); + S.m_10 = fmaf(a, fmaf(-b, _S.m_00, a * _S.m_10), + b * (fmaf(-b, _S.m_10, a * _S.m_11))); + S.m_11 = fmaf(-b, fmaf(-b, _S.m_00, a * _S.m_10), + a * (fmaf(-b, _S.m_10, a * _S.m_11))); + S.m_20 = fmaf(a, _S.m_20, b * _S.m_21); + S.m_21 = fmaf(-b, _S.m_20, a * _S.m_21); + S.m_22 = _S.m_22; + // update cumulative rotation qV + glm::vec3 tmp = g.sh * glm::vec3(q); + g.sh *= q[3]; + // (x,y,z) corresponds to ((0,1,2),(1,2,0),(2,0,1)) for (p,q) = + // ((0,1),(1,2),(0,2)) + q[z] = fmaf(q[z], g.ch, g.sh); + q[3] = fmaf(q[3], g.ch, -tmp[z]); // w + q[x] = fmaf(q[x], g.ch, tmp[y]); + q[y] = fmaf(q[y], g.ch, -tmp[x]); + // re-arrange matrix for next iteration + _S.m_00 = S.m_11; + _S.m_10 = S.m_21; + _S.m_11 = S.m_22; + _S.m_20 = S.m_10; + _S.m_21 = S.m_20; + _S.m_22 = S.m_00; + S.m_00 = _S.m_00; + S.m_10 = _S.m_10; + S.m_11 = _S.m_11; + S.m_20 = _S.m_20; + S.m_21 = _S.m_21; + S.m_22 = _S.m_22; +} +// Function used to contain the Givens permutations and the loop of the jacobi +// steps controlled by JACOBI_STEPS Returns the quaternion q containing the +// cumulative result used to reconstruct S +inline glm::mat3 JacobiEigenAnalysis(Symmetric3x3 S) { + glm::vec4 q(0, 0, 0, 1); + for (int32_t i = 0; i < JACOBI_STEPS; i++) { + JacobiConjugation(0, 1, 2, S, q); + JacobiConjugation(1, 2, 0, S, q); + JacobiConjugation(2, 0, 1, S, q); + } + return glm::mat3(1.f - 2.f * (fmaf(q.y, q.y, q.z * q.z)), // + 2.f * fmaf(q.x, q.y, +q.w * q.z), // + 2.f * fmaf(q.x, q.z, -q.w * q.y), // + 2 * fmaf(q.x, q.y, -q.w * q.z), // + 1 - 2 * fmaf(q.x, q.x, q.z * q.z), // + 2 * fmaf(q.y, q.z, q.w * q.x), // + 2 * fmaf(q.x, q.z, q.w * q.y), // + 2 * fmaf(q.y, q.z, -q.w * q.x), // + 1 - 2 * fmaf(q.x, q.x, q.y * q.y)); +} +// Implementation of Algorithm 3 +inline void SortSingularValues(glm::mat3& B, glm::mat3& V) { + float rho1 = Dist2(B[0]); + float rho2 = Dist2(B[1]); + float rho3 = Dist2(B[2]); + bool c; + c = rho1 < rho2; + CondNegSwap(c, B[0][0], B[1][0]); + CondNegSwap(c, V[0][0], V[1][0]); + CondNegSwap(c, B[0][1], B[1][1]); + CondNegSwap(c, V[0][1], V[1][1]); + CondNegSwap(c, B[0][2], B[1][2]); + CondNegSwap(c, V[0][2], V[1][2]); + CondSwap(c, rho1, rho2); + c = rho1 < rho3; + CondNegSwap(c, B[0][0], B[2][0]); + CondNegSwap(c, V[0][0], V[2][0]); + CondNegSwap(c, B[0][1], B[2][1]); + CondNegSwap(c, V[0][1], V[2][1]); + CondNegSwap(c, B[0][2], B[2][2]); + CondNegSwap(c, V[0][2], V[2][2]); + CondSwap(c, rho1, rho3); + c = rho2 < rho3; + CondNegSwap(c, B[1][0], B[2][0]); + CondNegSwap(c, V[1][0], V[2][0]); + CondNegSwap(c, B[1][1], B[2][1]); + CondNegSwap(c, V[1][1], V[2][1]); + CondNegSwap(c, B[1][2], B[2][2]); + CondNegSwap(c, V[1][2], V[2][2]); +} +// Implementation of Algorithm 4 +inline Givens QRGivensQuaternion(float a1, float a2) { + // a1 = pivot point on diagonal + // a2 = lower triangular entry we want to annihilate + float epsilon = (float)_SVD_EPSILON; + float rho = sqrt(fmaf(a1, a1, +a2 * a2)); + Givens g{fabsf(a1) + fmaxf(rho, epsilon), rho > epsilon ? a2 : 0}; + bool b = a1 < 0.f; + CondSwap(b, g.sh, g.ch); + float w = 1.f / sqrt(fmaf(g.ch, g.ch, g.sh * g.sh)); + g.ch *= w; + g.sh *= w; + return g; +} +// Implements a QR decomposition of a Matrix, see Sec 4.2 +inline QR QRDecomposition(glm::mat3& B) { + glm::mat3 Q, R; + // first Givens rotation (ch,0,0,sh) + auto g1 = QRGivensQuaternion(B[0][0], B[0][1]); + auto a = fmaf(-2.f, g1.sh * g1.sh, 1.f); + auto b = 2.f * g1.ch * g1.sh; + // apply B = Q' * B + R[0][0] = fmaf(a, B[0][0], b * B[0][1]); + R[1][0] = fmaf(a, B[1][0], b * B[1][1]); + R[2][0] = fmaf(a, B[2][0], b * B[2][1]); + R[0][1] = fmaf(-b, B[0][0], a * B[0][1]); + R[1][1] = fmaf(-b, B[1][0], a * B[1][1]); + R[2][1] = fmaf(-b, B[2][0], a * B[2][1]); + R[0][2] = B[0][2]; + R[1][2] = B[1][2]; + R[2][2] = B[2][2]; + // second Givens rotation (ch,0,-sh,0) + auto g2 = QRGivensQuaternion(R[0][0], R[0][2]); + a = fmaf(-2.f, g2.sh * g2.sh, 1.f); + b = 2.f * g2.ch * g2.sh; + // apply B = Q' * B; + B[0][0] = fmaf(a, R[0][0], b * R[0][2]); + B[1][0] = fmaf(a, R[1][0], b * R[1][2]); + B[2][0] = fmaf(a, R[2][0], b * R[2][2]); + B[0][1] = R[0][1]; + B[1][1] = R[1][1]; + B[2][1] = R[2][1]; + B[0][2] = fmaf(-b, R[0][0], a * R[0][2]); + B[1][2] = fmaf(-b, R[1][0], a * R[1][2]); + B[2][2] = fmaf(-b, R[2][0], a * R[2][2]); + // third Givens rotation (ch,sh,0,0) + auto g3 = QRGivensQuaternion(B[1][1], B[1][2]); + a = fmaf(-2.f, g3.sh * g3.sh, 1.f); + b = 2.f * g3.ch * g3.sh; + // R is now set to desired value + R[0][0] = B[0][0]; + R[1][0] = B[1][0]; + R[2][0] = B[2][0]; + R[0][1] = fmaf(a, B[0][1], b * B[0][2]); + R[1][1] = fmaf(a, B[1][1], b * B[1][2]); + R[2][1] = fmaf(a, B[2][1], b * B[2][2]); + R[0][2] = fmaf(-b, B[0][1], a * B[0][2]); + R[1][2] = fmaf(-b, B[1][1], a * B[1][2]); + R[2][2] = fmaf(-b, B[2][1], a * B[2][2]); + // construct the cumulative rotation Q=Q1 * Q2 * Q3 + // the number of floating point operations for three quaternion + // multiplications is more or less comparable to the explicit form of the + // joined matrix. certainly more memory-efficient! + auto sh12 = 2.f * fmaf(g1.sh, g1.sh, -0.5f); + auto sh22 = 2.f * fmaf(g2.sh, g2.sh, -0.5f); + auto sh32 = 2.f * fmaf(g3.sh, g3.sh, -0.5f); + Q[0][0] = sh12 * sh22; + Q[1][0] = fmaf(4.f * g2.ch * g3.ch, sh12 * g2.sh * g3.sh, + 2.f * g1.ch * g1.sh * sh32); + Q[2][0] = fmaf(4.f * g1.ch * g3.ch, g1.sh * g3.sh, + -2.f * g2.ch * sh12 * g2.sh * sh32); + + Q[0][1] = -2.f * g1.ch * g1.sh * sh22; + Q[1][1] = + fmaf(-8.f * g1.ch * g2.ch * g3.ch, g1.sh * g2.sh * g3.sh, sh12 * sh32); + Q[2][1] = fmaf( + -2.f * g3.ch, g3.sh, + 4.f * g1.sh * fmaf(g3.ch * g1.sh, g3.sh, g1.ch * g2.ch * g2.sh * sh32)); + + Q[0][2] = 2.f * g2.ch * g2.sh; + Q[1][2] = -2.f * g3.ch * sh22 * g3.sh; + Q[2][2] = sh22 * sh32; + return QR{Q, R}; +} +} // namespace + +namespace manifold { +/** @addtogroup Connections + * @{ + */ + +/** + * The three matrices of a Singular Value Decomposition. + */ +struct SVDSet { + glm::mat3 U, S, V; +}; + +/** + * Returns the Singular Value Decomposition of A: A = U * S * glm::transpose(V). + * + * @param A The matrix to decompose. + */ +inline SVDSet SVD(glm::mat3 A) { + glm::mat3 V = JacobiEigenAnalysis(glm::transpose(A) * A); + auto B = A * V; + SortSingularValues(B, V); + QR qr = QRDecomposition(B); + return SVDSet{qr.Q, qr.R, V}; +} + +/** + * Returns the largest singular value of A. + * + * @param A The matrix to measure. + */ +inline float SpectralNorm(glm::mat3 A) { + SVDSet usv = SVD(A); + return usv.S[0][0]; +} +/** @} */ +} // namespace manifold diff --git a/thirdparty/manifold/src/utilities/include/tri_dist.h b/thirdparty/manifold/src/utilities/include/tri_dist.h new file mode 100644 index 000000000000..f768f7f90cf3 --- /dev/null +++ b/thirdparty/manifold/src/utilities/include/tri_dist.h @@ -0,0 +1,225 @@ +// Copyright 2024 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +namespace manifold { + +// From NVIDIA-Omniverse PhysX - BSD 3-Clause "New" or "Revised" License +// https://github.com/NVIDIA-Omniverse/PhysX/blob/main/LICENSE.md +// https://github.com/NVIDIA-Omniverse/PhysX/blob/main/physx/source/geomutils/src/sweep/GuSweepCapsuleCapsule.cpp +// With minor modifications + +/** + * Returns the distance between two line segments. + * + * @param[out] x Closest point on line segment pa. + * @param[out] y Closest point on line segment qb. + * @param[in] p One endpoint of the first line segment. + * @param[in] a Other endpoint of the first line segment. + * @param[in] p One endpoint of the second line segment. + * @param[in] b Other endpoint of the second line segment. + */ +inline void EdgeEdgeDist(glm::vec3& x, glm::vec3& y, // closest points + const glm::vec3& p, + const glm::vec3& a, // seg 1 origin, vector + const glm::vec3& q, + const glm::vec3& b) // seg 2 origin, vector +{ + const glm::vec3 T = q - p; + const float ADotA = glm::dot(a, a); + const float BDotB = glm::dot(b, b); + const float ADotB = glm::dot(a, b); + const float ADotT = glm::dot(a, T); + const float BDotT = glm::dot(b, T); + + // t parameterizes ray (p, a) + // u parameterizes ray (q, b) + + // Compute t for the closest point on ray (p, a) to ray (q, b) + const float Denom = ADotA * BDotB - ADotB * ADotB; + + float t; // We will clamp result so t is on the segment (p, a) + t = Denom != 0.0f + ? glm::clamp((ADotT * BDotB - BDotT * ADotB) / Denom, 0.0f, 1.0f) + : 0.0f; + + // find u for point on ray (q, b) closest to point at t + float u; + if (BDotB != 0.0f) { + u = (t * ADotB - BDotT) / BDotB; + + // if u is on segment (q, b), t and u correspond to closest points, + // otherwise, clamp u, recompute and clamp t + if (u < 0.0f) { + u = 0.0f; + t = ADotA != 0.0f ? glm::clamp(ADotT / ADotA, 0.0f, 1.0f) : 0.0f; + } else if (u > 1.0f) { + u = 1.0f; + t = ADotA != 0.0f ? glm::clamp((ADotB + ADotT) / ADotA, 0.0f, 1.0f) + : 0.0f; + } + } else { + u = 0.0f; + t = ADotA != 0.0f ? glm::clamp(ADotT / ADotA, 0.0f, 1.0f) : 0.0f; + } + x = p + a * t; + y = q + b * u; +} + +// From NVIDIA-Omniverse PhysX - BSD 3-Clause "New" or "Revised" License +// https://github.com/NVIDIA-Omniverse/PhysX/blob/main/LICENSE.md +// https://github.com/NVIDIA-Omniverse/PhysX/blob/main/physx/source/geomutils/src/distance/GuDistanceTriangleTriangle.cpp +// With minor modifications + +/** + * Returns the minimum squared distance between two triangles. + * + * @param p First triangle. + * @param q Second triangle. + */ +inline float DistanceTriangleTriangleSquared( + const std::array& p, const std::array& q) { + std::array Sv; + Sv[0] = p[1] - p[0]; + Sv[1] = p[2] - p[1]; + Sv[2] = p[0] - p[2]; + + std::array Tv; + Tv[0] = q[1] - q[0]; + Tv[1] = q[2] - q[1]; + Tv[2] = q[0] - q[2]; + + bool shown_disjoint = false; + + float mindd = std::numeric_limits::max(); + + for (uint32_t i = 0; i < 3; i++) { + for (uint32_t j = 0; j < 3; j++) { + glm::vec3 cp; + glm::vec3 cq; + EdgeEdgeDist(cp, cq, p[i], Sv[i], q[j], Tv[j]); + const glm::vec3 V = cq - cp; + const float dd = glm::dot(V, V); + + if (dd <= mindd) { + mindd = dd; + + uint32_t id = i + 2; + if (id >= 3) id -= 3; + glm::vec3 Z = p[id] - cp; + float a = glm::dot(Z, V); + id = j + 2; + if (id >= 3) id -= 3; + Z = q[id] - cq; + float b = glm::dot(Z, V); + + if ((a <= 0.0f) && (b >= 0.0f)) { + return glm::dot(V, V); + }; + + if (a <= 0.0f) + a = 0.0f; + else if (b > 0.0f) + b = 0.0f; + + if ((mindd - a + b) > 0.0f) shown_disjoint = true; + } + } + } + + glm::vec3 Sn = glm::cross(Sv[0], Sv[1]); + float Snl = glm::dot(Sn, Sn); + + if (Snl > 1e-15f) { + const glm::vec3 Tp(glm::dot(p[0] - q[0], Sn), glm::dot(p[0] - q[1], Sn), + glm::dot(p[0] - q[2], Sn)); + + int index = -1; + if ((Tp[0] > 0.0f) && (Tp[1] > 0.0f) && (Tp[2] > 0.0f)) { + index = Tp[0] < Tp[1] ? 0 : 1; + if (Tp[2] < Tp[index]) index = 2; + } else if ((Tp[0] < 0.0f) && (Tp[1] < 0.0f) && (Tp[2] < 0.0f)) { + index = Tp[0] > Tp[1] ? 0 : 1; + if (Tp[2] > Tp[index]) index = 2; + } + + if (index >= 0) { + shown_disjoint = true; + + const glm::vec3& qIndex = q[index]; + + glm::vec3 V = qIndex - p[0]; + glm::vec3 Z = glm::cross(Sn, Sv[0]); + if (glm::dot(V, Z) > 0.0f) { + V = qIndex - p[1]; + Z = glm::cross(Sn, Sv[1]); + if (glm::dot(V, Z) > 0.0f) { + V = qIndex - p[2]; + Z = glm::cross(Sn, Sv[2]); + if (glm::dot(V, Z) > 0.0f) { + glm::vec3 cp = qIndex + Sn * Tp[index] / Snl; + glm::vec3 cq = qIndex; + return glm::dot(cp - cq, cp - cq); + } + } + } + } + } + + glm::vec3 Tn = glm::cross(Tv[0], Tv[1]); + float Tnl = glm::dot(Tn, Tn); + + if (Tnl > 1e-15f) { + const glm::vec3 Sp(glm::dot(q[0] - p[0], Tn), glm::dot(q[0] - p[1], Tn), + glm::dot(q[0] - p[2], Tn)); + + int index = -1; + if ((Sp[0] > 0.0f) && (Sp[1] > 0.0f) && (Sp[2] > 0.0f)) { + index = Sp[0] < Sp[1] ? 0 : 1; + if (Sp[2] < Sp[index]) index = 2; + } else if ((Sp[0] < 0.0f) && (Sp[1] < 0.0f) && (Sp[2] < 0.0f)) { + index = Sp[0] > Sp[1] ? 0 : 1; + if (Sp[2] > Sp[index]) index = 2; + } + + if (index >= 0) { + shown_disjoint = true; + + const glm::vec3& pIndex = p[index]; + + glm::vec3 V = pIndex - q[0]; + glm::vec3 Z = glm::cross(Tn, Tv[0]); + if (glm::dot(V, Z) > 0.0f) { + V = pIndex - q[1]; + Z = glm::cross(Tn, Tv[1]); + if (glm::dot(V, Z) > 0.0f) { + V = pIndex - q[2]; + Z = glm::cross(Tn, Tv[2]); + if (glm::dot(V, Z) > 0.0f) { + glm::vec3 cp = pIndex; + glm::vec3 cq = pIndex + Tn * Sp[index] / Tnl; + return glm::dot(cp - cq, cp - cq); + } + } + } + } + } + + return shown_disjoint ? mindd : 0.0f; +}; +} // namespace manifold diff --git a/thirdparty/manifold/src/utilities/include/utils.h b/thirdparty/manifold/src/utilities/include/utils.h new file mode 100644 index 000000000000..1c13cd0d367a --- /dev/null +++ b/thirdparty/manifold/src/utilities/include/utils.h @@ -0,0 +1,242 @@ +// Copyright 2020 The Manifold Authors, Jared Hoberock and Nathan Bell of +// NVIDIA Research +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include + +#ifdef MANIFOLD_DEBUG +#include +#include +#endif + +#include "par.h" +#include "vec.h" + +#if __has_include() +#include +#else +#define FrameMarkStart(x) +#define FrameMarkEnd(x) +// putting ZoneScoped in a function will instrument the function execution when +// TRACY_ENABLE is set, which allows the profiler to record more accurate +// timing. +#define ZoneScoped +#define ZoneScopedN(name) +#endif + +namespace manifold { + +/** @defgroup Private + * @brief Internal classes of the library; not currently part of the public API + * @{ + */ +#ifdef MANIFOLD_DEBUG +struct Timer { + std::chrono::high_resolution_clock::time_point start, end; + + void Start() { start = std::chrono::high_resolution_clock::now(); } + + void Stop() { end = std::chrono::high_resolution_clock::now(); } + + float Elapsed() { + return std::chrono::duration_cast(end - start) + .count(); + } + void Print(std::string message) { + std::cout << "----------- " << std::round(Elapsed()) << " ms for " + << message << std::endl; + } +}; +#endif + +template +thrust::zip_iterator> zip(Iters... iters) { + return thrust::make_zip_iterator(thrust::make_tuple(iters...)); +} + +template +thrust::permutation_iterator perm(A a, B b) { + return thrust::make_permutation_iterator(a, b); +} + +template +thrust::counting_iterator countAt(T i) { + return thrust::make_counting_iterator(i); +} + +inline int Next3(int i) { + constexpr glm::ivec3 next3(1, 2, 0); + return next3[i]; +} + +inline int Prev3(int i) { + constexpr glm::ivec3 prev3(2, 0, 1); + return prev3[i]; +} + +template +T AtomicAdd(T& target, T add) { + std::atomic& tar = reinterpret_cast&>(target); + T old_val = tar.load(); + while (!tar.compare_exchange_weak(old_val, old_val + add, + std::memory_order_seq_cst)) + ; + return old_val; +} + +template <> +inline int AtomicAdd(int& target, int add) { + std::atomic& tar = reinterpret_cast&>(target); + int old_val = tar.fetch_add(add, std::memory_order_seq_cst); + return old_val; +} + +// Copied from +// https://github.com/thrust/thrust/blob/master/examples/strided_range.cu +template +class strided_range { + public: + typedef typename thrust::iterator_difference::type difference_type; + + struct stride_functor + : public thrust::unary_function { + difference_type stride; + + stride_functor(difference_type stride) : stride(stride) {} + + difference_type operator()(const difference_type& i) const { + return stride * i; + } + }; + + typedef typename thrust::counting_iterator CountingIterator; + typedef typename thrust::transform_iterator + TransformIterator; + typedef typename thrust::permutation_iterator + PermutationIterator; + + // type of the strided_range iterator + typedef PermutationIterator iterator; + + // construct strided_range for the range [first,last) + strided_range(Iterator first, Iterator last, difference_type stride) + : first(first), last(last), stride(stride) {} + strided_range() {} + + iterator begin(void) const { + return PermutationIterator( + first, TransformIterator(CountingIterator(0), stride_functor(stride))); + } + + iterator end(void) const { + return begin() + ((last - first) + (stride - 1)) / stride; + } + + protected: + Iterator first; + Iterator last; + difference_type stride; +}; + +template +class ConcurrentSharedPtr { + public: + ConcurrentSharedPtr(T value) : impl(std::make_shared(value)) {} + ConcurrentSharedPtr(const ConcurrentSharedPtr& other) + : impl(other.impl), mutex(other.mutex) {} + class SharedPtrGuard { + public: + SharedPtrGuard(std::recursive_mutex* mutex, T* content) + : mutex(mutex), content(content) { + mutex->lock(); + } + ~SharedPtrGuard() { mutex->unlock(); } + + T& operator*() { return *content; } + T* operator->() { return content; } + + private: + std::recursive_mutex* mutex; + T* content; + }; + SharedPtrGuard GetGuard() { return SharedPtrGuard(mutex.get(), impl.get()); }; + unsigned int UseCount() { return impl.use_count(); }; + + private: + std::shared_ptr impl; + std::shared_ptr mutex = + std::make_shared(); +}; + +template +struct UnionFind { + Vec parents; + // we do union by rank + // note that we shift rank by 1, rank 0 means it is not connected to anything + // else + Vec ranks; + + UnionFind(I numNodes) : parents(numNodes), ranks(numNodes, 0) { + sequence(autoPolicy(numNodes), parents.begin(), parents.end()); + } + + I find(I x) { + while (parents[x] != x) { + parents[x] = parents[parents[x]]; + x = parents[x]; + } + return x; + } + + void unionXY(I x, I y) { + if (x == y) return; + if (ranks[x] == 0) ranks[x] = 1; + if (ranks[y] == 0) ranks[y] = 1; + x = find(x); + y = find(y); + if (x == y) return; + if (ranks[x] < ranks[y]) std::swap(x, y); + if (ranks[x] == ranks[y]) ranks[x]++; + parents[y] = x; + } + + I connectedComponents(std::vector& components) { + components.resize(parents.size()); + I lonelyNodes = 0; + std::unordered_map toLabel; + for (size_t i = 0; i < parents.size(); ++i) { + // we optimize for connected component of size 1 + // no need to put them into the hashmap + if (ranks[i] == 0) { + components[i] = static_cast(toLabel.size()) + lonelyNodes++; + continue; + } + parents[i] = find(i); + auto iter = toLabel.find(parents[i]); + if (iter == toLabel.end()) { + I s = static_cast(toLabel.size()) + lonelyNodes; + toLabel.insert(std::make_pair(parents[i], s)); + components[i] = s; + } else { + components[i] = iter->second; + } + } + return toLabel.size() + lonelyNodes; + } +}; +/** @} */ +} // namespace manifold diff --git a/thirdparty/manifold/src/utilities/include/vec.h b/thirdparty/manifold/src/utilities/include/vec.h new file mode 100644 index 000000000000..5409843d94a9 --- /dev/null +++ b/thirdparty/manifold/src/utilities/include/vec.h @@ -0,0 +1,244 @@ +// Copyright 2021 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#if TRACY_ENABLE && TRACY_MEMORY_USAGE +#include "tracy/Tracy.hpp" +#else +#define TracyAllocS(ptr, size, n) (void)0 +#define TracyFreeS(ptr, n) (void)0 +#endif + +#include "par.h" +#include "public.h" +#include "vec_view.h" + +namespace manifold { + +/** @addtogroup Private + * @{ + */ +template +class Vec; + +/* + * Specialized vector implementation with multithreaded fill and uninitialized + * memory optimizations. + * Note that the constructor and resize function will not perform initialization + * if the parameter val is not set. Also, this implementation is a toy + * implementation that did not consider things like non-trivial + * constructor/destructor, please keep T trivial. + */ +template +class Vec : public VecView { + public: + Vec() {} + + // Note that the vector constructed with this constructor will contain + // uninitialized memory. Please specify `val` if you need to make sure that + // the data is initialized. + Vec(size_t size) { + reserve(size); + this->size_ = size; + } + + Vec(size_t size, T val) { resize(size, val); } + + Vec(const Vec &vec) { + this->size_ = vec.size(); + this->capacity_ = this->size_; + auto policy = autoPolicy(this->size_); + if (this->size_ != 0) { + this->ptr_ = reinterpret_cast(malloc(this->size_ * sizeof(T))); + if (this->ptr_ == nullptr) throw std::bad_alloc(); + TracyAllocS(this->ptr_, this->size_ * sizeof(T), 3); + uninitialized_copy(policy, vec.begin(), vec.end(), this->ptr_); + } + } + + Vec(const std::vector &vec) { + this->size_ = vec.size(); + this->capacity_ = this->size_; + auto policy = autoPolicy(this->size_); + if (this->size_ != 0) { + this->ptr_ = reinterpret_cast(malloc(this->size_ * sizeof(T))); + if (this->ptr_ == nullptr) throw std::bad_alloc(); + TracyAllocS(this->ptr_, this->size_ * sizeof(T), 3); + uninitialized_copy(policy, vec.begin(), vec.end(), this->ptr_); + } + } + + Vec(Vec &&vec) { + this->ptr_ = vec.ptr_; + this->size_ = vec.size_; + capacity_ = vec.capacity_; + vec.ptr_ = nullptr; + vec.size_ = 0; + vec.capacity_ = 0; + } + + operator VecView() { return {this->ptr_, this->size_}; } + operator VecView() const { return {this->ptr_, this->size_}; } + + ~Vec() { + if (this->ptr_ != nullptr) { + TracyFreeS(this->ptr_, 3); + free(this->ptr_); + } + this->ptr_ = nullptr; + this->size_ = 0; + capacity_ = 0; + } + + Vec &operator=(const Vec &other) { + if (&other == this) return *this; + if (this->ptr_ != nullptr) { + TracyFreeS(this->ptr_, 3); + free(this->ptr_); + } + this->size_ = other.size_; + capacity_ = other.size_; + auto policy = autoPolicy(this->size_); + if (this->size_ != 0) { + this->ptr_ = reinterpret_cast(malloc(this->size_ * sizeof(T))); + if (this->ptr_ == nullptr) throw std::bad_alloc(); + TracyAllocS(this->ptr_, this->size_ * sizeof(T), 3); + uninitialized_copy(policy, other.begin(), other.end(), this->ptr_); + } + return *this; + } + + Vec &operator=(Vec &&other) { + if (&other == this) return *this; + if (this->ptr_ != nullptr) { + TracyFreeS(this->ptr_, 3); + free(this->ptr_); + } + this->size_ = other.size_; + capacity_ = other.capacity_; + this->ptr_ = other.ptr_; + other.ptr_ = nullptr; + other.size_ = 0; + other.capacity_ = 0; + return *this; + } + + operator VecView() const { return {this->ptr_, this->size_}; } + + void swap(Vec &other) { + std::swap(this->ptr_, other.ptr_); + std::swap(this->size_, other.size_); + std::swap(capacity_, other.capacity_); + } + + inline void push_back(const T &val) { + if (this->size_ >= capacity_) { + // avoid dangling pointer in case val is a reference of our array + T val_copy = val; + reserve(capacity_ == 0 ? 128 : capacity_ * 2); + this->ptr_[this->size_++] = val_copy; + return; + } + this->ptr_[this->size_++] = val; + } + + void reserve(size_t n) { + if (n > capacity_) { + T *newBuffer = reinterpret_cast(malloc(n * sizeof(T))); + if (newBuffer == nullptr) throw std::bad_alloc(); + TracyAllocS(newBuffer, n * sizeof(T), 3); + if (this->size_ > 0) + uninitialized_copy(autoPolicy(this->size_), this->ptr_, + this->ptr_ + this->size_, newBuffer); + if (this->ptr_ != nullptr) { + TracyFreeS(this->ptr_, 3); + free(this->ptr_); + } + this->ptr_ = newBuffer; + capacity_ = n; + } + } + + void resize(size_t newSize, T val = T()) { + bool shrink = this->size_ > 2 * newSize; + reserve(newSize); + if (this->size_ < newSize) { + uninitialized_fill(autoPolicy(newSize - this->size_), + this->ptr_ + this->size_, this->ptr_ + newSize, val); + } + this->size_ = newSize; + if (shrink) shrink_to_fit(); + } + + void shrink_to_fit() { + T *newBuffer = nullptr; + if (this->size_ > 0) { + newBuffer = reinterpret_cast(malloc(this->size_ * sizeof(T))); + if (newBuffer == nullptr) throw std::bad_alloc(); + TracyAllocS(newBuffer, this->size_ * sizeof(T), 3); + uninitialized_copy(autoPolicy(this->size_), this->ptr_, + this->ptr_ + this->size_, newBuffer); + } + if (this->ptr_ != nullptr) { + TracyFreeS(this->ptr_, 3); + free(this->ptr_); + } + this->ptr_ = newBuffer; + capacity_ = this->size_; + } + + VecView view(size_t offset = 0, + size_t length = std::numeric_limits::max()) { + if (length == std::numeric_limits::max()) { + length = this->size_ - offset; + if (length < 0) throw std::out_of_range("Vec::view out of range"); + } else if (offset + length > this->size_ || offset < 0) { + throw std::out_of_range("Vec::view out of range"); + } else if (length < 0) { + throw std::out_of_range("Vec::view negative length is not allowed"); + } + return VecView(this->ptr_ + offset, length); + } + + VecView cview( + size_t offset = 0, + size_t length = std::numeric_limits::max()) const { + if (length == std::numeric_limits::max()) { + length = this->size_ - offset; + if (length < 0) throw std::out_of_range("Vec::cview out of range"); + } else if (offset + length > this->size_ || offset < 0) { + throw std::out_of_range("Vec::cview out of range"); + } else if (length < 0) { + throw std::out_of_range("Vec::cview negative length is not allowed"); + } + return VecView(this->ptr_ + offset, length); + } + + VecView view( + size_t offset = 0, + size_t length = std::numeric_limits::max()) const { + return cview(offset, length); + } + + T *data() { return this->ptr_; } + const T *data() const { return this->ptr_; } + + private: + size_t capacity_ = 0; + + static_assert(std::is_trivially_destructible::value); +}; +/** @} */ +} // namespace manifold diff --git a/thirdparty/manifold/src/utilities/include/vec_view.h b/thirdparty/manifold/src/utilities/include/vec_view.h new file mode 100644 index 000000000000..5945f20c8106 --- /dev/null +++ b/thirdparty/manifold/src/utilities/include/vec_view.h @@ -0,0 +1,116 @@ +// Copyright 2023 The Manifold Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +namespace manifold { + +/** + * View for Vec, can perform offset operation. + * This will be invalidated when the original vector is dropped or changes + * length. Roughly equivalent to std::span from c++20 + */ +template +class VecView { + public: + using Iter = T *; + using IterC = const T *; + + VecView(T *ptr, size_t size) : ptr_(ptr), size_(size) {} + + VecView(const VecView &other) { + ptr_ = other.ptr_; + size_ = other.size_; + } + + VecView &operator=(const VecView &other) { + ptr_ = other.ptr_; + size_ = other.size_; + return *this; + } + + // allows conversion to a const VecView + operator VecView() const { return {ptr_, size_}; } + + inline const T &operator[](size_t i) const { + if (i >= size_) { + throw std::out_of_range("Vec out of range"); + } + return ptr_[i]; + } + + inline T &operator[](size_t i) { + if (i >= size_) { + throw std::out_of_range("Vec out of range"); + } + return ptr_[i]; + } + + IterC cbegin() const { return ptr_; } + IterC cend() const { return ptr_ + size_; } + + IterC begin() const { return cbegin(); } + IterC end() const { return cend(); } + + Iter begin() { return ptr_; } + Iter end() { return ptr_ + size_; } + + const T &front() const { + if (size_ == 0) + throw std::out_of_range("attempt to take the front of an empty vector"); + return ptr_[0]; + } + + const T &back() const { + if (size_ == 0) + throw std::out_of_range("attempt to take the back of an empty vector"); + return ptr_[size_ - 1]; + } + + T &front() { + if (size_ == 0) + throw std::out_of_range("attempt to take the front of an empty vector"); + return ptr_[0]; + } + + T &back() { + if (size_ == 0) + throw std::out_of_range("attempt to take the back of an empty vector"); + return ptr_[size_ - 1]; + } + + size_t size() const { return size_; } + + bool empty() const { return size_ == 0; } + +#ifdef MANIFOLD_DEBUG + void Dump() const { + std::cout << "Vec = " << std::endl; + for (size_t i = 0; i < size(); ++i) { + std::cout << i << ", " << ptr_[i] << ", " << std::endl; + } + std::cout << std::endl; + } +#endif + + protected: + T *ptr_ = nullptr; + size_t size_ = 0; + + VecView() = default; +}; + +} // namespace manifold diff --git a/thirdparty/manifold/thirdparty/glm/.gitrepo b/thirdparty/manifold/thirdparty/glm/.gitrepo new file mode 100644 index 000000000000..49bb6ea957e9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/ingydotnet/git-subrepo#readme +; +[subrepo] + remote = https://github.com/g-truc/glm.git + branch = b06b775c1c80af51a1183c0e167f9de3b2351a79 + commit = b06b775c1c80af51a1183c0e167f9de3b2351a79 + parent = e416591e8048b25becc0a3717b106c815dcb060b + method = merge + cmdver = 0.4.6 diff --git a/thirdparty/manifold/thirdparty/glm/copying.txt b/thirdparty/manifold/thirdparty/glm/copying.txt new file mode 100644 index 000000000000..779c32fb9afe --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/copying.txt @@ -0,0 +1,54 @@ +================================================================================ +OpenGL Mathematics (GLM) +-------------------------------------------------------------------------------- +GLM is licensed under The Happy Bunny License or MIT License + +================================================================================ +The Happy Bunny License (Modified MIT License) +-------------------------------------------------------------------------------- +Copyright (c) 2005 - G-Truc Creation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Restrictions: + By making use of the Software for military purposes, you choose to make a + Bunny unhappy. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +================================================================================ +The MIT License +-------------------------------------------------------------------------------- +Copyright (c) 2005 - G-Truc Creation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/thirdparty/manifold/thirdparty/glm/glm/CMakeLists.txt b/thirdparty/manifold/thirdparty/glm/glm/CMakeLists.txt new file mode 100644 index 000000000000..178d23abdfbc --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/CMakeLists.txt @@ -0,0 +1,69 @@ +file(GLOB ROOT_SOURCE *.cpp) +file(GLOB ROOT_INLINE *.inl) +file(GLOB ROOT_HEADER *.hpp) +file(GLOB ROOT_TEXT ../*.txt) +file(GLOB ROOT_MD ../*.md) +file(GLOB ROOT_NAT ../util/glm.natvis) + +file(GLOB_RECURSE CORE_SOURCE ./detail/*.cpp) +file(GLOB_RECURSE CORE_INLINE ./detail/*.inl) +file(GLOB_RECURSE CORE_HEADER ./detail/*.hpp) + +file(GLOB_RECURSE EXT_SOURCE ./ext/*.cpp) +file(GLOB_RECURSE EXT_INLINE ./ext/*.inl) +file(GLOB_RECURSE EXT_HEADER ./ext/*.hpp) + +file(GLOB_RECURSE GTC_SOURCE ./gtc/*.cpp) +file(GLOB_RECURSE GTC_INLINE ./gtc/*.inl) +file(GLOB_RECURSE GTC_HEADER ./gtc/*.hpp) + +file(GLOB_RECURSE GTX_SOURCE ./gtx/*.cpp) +file(GLOB_RECURSE GTX_INLINE ./gtx/*.inl) +file(GLOB_RECURSE GTX_HEADER ./gtx/*.hpp) + +file(GLOB_RECURSE SIMD_SOURCE ./simd/*.cpp) +file(GLOB_RECURSE SIMD_INLINE ./simd/*.inl) +file(GLOB_RECURSE SIMD_HEADER ./simd/*.h) + +source_group("Text Files" FILES ${ROOT_TEXT} ${ROOT_MD}) +source_group("Core Files" FILES ${CORE_SOURCE}) +source_group("Core Files" FILES ${CORE_INLINE}) +source_group("Core Files" FILES ${CORE_HEADER}) +source_group("EXT Files" FILES ${EXT_SOURCE}) +source_group("EXT Files" FILES ${EXT_INLINE}) +source_group("EXT Files" FILES ${EXT_HEADER}) +source_group("GTC Files" FILES ${GTC_SOURCE}) +source_group("GTC Files" FILES ${GTC_INLINE}) +source_group("GTC Files" FILES ${GTC_HEADER}) +source_group("GTX Files" FILES ${GTX_SOURCE}) +source_group("GTX Files" FILES ${GTX_INLINE}) +source_group("GTX Files" FILES ${GTX_HEADER}) +source_group("SIMD Files" FILES ${SIMD_SOURCE}) +source_group("SIMD Files" FILES ${SIMD_INLINE}) +source_group("SIMD Files" FILES ${SIMD_HEADER}) + +add_library(glm-header-only INTERFACE) +add_library(glm::glm-header-only ALIAS glm-header-only) + +target_include_directories(glm-header-only INTERFACE + "$" + "$" +) + +if (GLM_BUILD_LIBRARY) + add_library(glm + ${ROOT_TEXT} ${ROOT_MD} ${ROOT_NAT} + ${ROOT_SOURCE} ${ROOT_INLINE} ${ROOT_HEADER} + ${CORE_SOURCE} ${CORE_INLINE} ${CORE_HEADER} + ${EXT_SOURCE} ${EXT_INLINE} ${EXT_HEADER} + ${GTC_SOURCE} ${GTC_INLINE} ${GTC_HEADER} + ${GTX_SOURCE} ${GTX_INLINE} ${GTX_HEADER} + ${SIMD_SOURCE} ${SIMD_INLINE} ${SIMD_HEADER} + ) + add_library(glm::glm ALIAS glm) + target_link_libraries(glm PUBLIC glm-header-only) +else() + add_library(glm INTERFACE) + add_library(glm::glm ALIAS glm) + target_link_libraries(glm INTERFACE glm-header-only) +endif() diff --git a/thirdparty/manifold/thirdparty/glm/glm/common.hpp b/thirdparty/manifold/thirdparty/glm/glm/common.hpp new file mode 100644 index 000000000000..b59657d549f2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/common.hpp @@ -0,0 +1,539 @@ +/// @ref core +/// @file glm/common.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.3 Common Functions +/// +/// @defgroup core_func_common Common functions +/// @ingroup core +/// +/// Provides GLSL common functions +/// +/// These all operate component-wise. The description is per component. +/// +/// Include to use these core features. + +#pragma once + +#include "detail/qualifier.hpp" +#include "detail/_fixes.hpp" + +namespace glm +{ + /// @addtogroup core_func_common + /// @{ + + /// Returns x if x >= 0; otherwise, it returns -x. + /// + /// @tparam genType floating-point or signed integer; scalar or vector types. + /// + /// @see GLSL abs man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR genType abs(genType x); + + /// Returns x if x >= 0; otherwise, it returns -x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL abs man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec abs(vec const& x); + + /// Returns 1.0 if x > 0, 0.0 if x == 0, or -1.0 if x < 0. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL sign man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec sign(vec const& x); + + /// Returns a value equal to the nearest integer that is less then or equal to x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL floor man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec floor(vec const& x); + + /// Returns a value equal to the nearest integer to x + /// whose absolute value is not larger than the absolute value of x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL trunc man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec trunc(vec const& x); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// This includes the possibility that round(x) returns the + /// same value as roundEven(x) for all values of x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL round man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec round(vec const& x); + + /// Returns a value equal to the nearest integer to x. + /// A fractional part of 0.5 will round toward the nearest even + /// integer. (Both 3.5 and 4.5 for x will return 4.0.) + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL roundEven man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + /// @see New round to even technique + template + GLM_FUNC_DECL vec roundEven(vec const& x); + + /// Returns a value equal to the nearest integer + /// that is greater than or equal to x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL ceil man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec ceil(vec const& x); + + /// Return x - floor(x). + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL fract man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType fract(genType x); + + /// Return x - floor(x). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL fract man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec fract(vec const& x); + + template + GLM_FUNC_DECL genType mod(genType x, genType y); + + template + GLM_FUNC_DECL vec mod(vec const& x, T y); + + /// Modulus. Returns x - y * floor(x / y) + /// for each component in x using the floating point value y. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types, include glm/gtc/integer for integer scalar types support + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL mod man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec mod(vec const& x, vec const& y); + + /// Returns the fractional part of x and sets i to the integer + /// part (as a whole number floating point value). Both the + /// return value and the output parameter will have the same + /// sign as x. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL modf man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType modf(genType x, genType& i); + + /// Returns y if y < x; otherwise, it returns x. + /// + /// @tparam genType Floating-point or integer; scalar or vector types. + /// + /// @see GLSL min man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR genType min(genType x, genType y); + + /// Returns y if y < x; otherwise, it returns x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL min man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& x, T y); + + /// Returns y if y < x; otherwise, it returns x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL min man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& x, vec const& y); + + /// Returns y if x < y; otherwise, it returns x. + /// + /// @tparam genType Floating-point or integer; scalar or vector types. + /// + /// @see GLSL max man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR genType max(genType x, genType y); + + /// Returns y if x < y; otherwise, it returns x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL max man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, T y); + + /// Returns y if x < y; otherwise, it returns x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL max man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, vec const& y); + + /// Returns min(max(x, minVal), maxVal) for each component in x + /// using the floating-point values minVal and maxVal. + /// + /// @tparam genType Floating-point or integer; scalar or vector types. + /// + /// @see GLSL clamp man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal); + + /// Returns min(max(x, minVal), maxVal) for each component in x + /// using the floating-point values minVal and maxVal. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL clamp man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec clamp(vec const& x, T minVal, T maxVal); + + /// Returns min(max(x, minVal), maxVal) for each component in x + /// using the floating-point values minVal and maxVal. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL clamp man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec clamp(vec const& x, vec const& minVal, vec const& maxVal); + + /// If genTypeU is a floating scalar or vector: + /// Returns x * (1.0 - a) + y * a, i.e., the linear blend of + /// x and y using the floating-point value a. + /// The value for a is not restricted to the range [0, 1]. + /// + /// If genTypeU is a boolean scalar or vector: + /// Selects which vector each returned component comes + /// from. For a component of 'a' that is false, the + /// corresponding component of 'x' is returned. For a + /// component of 'a' that is true, the corresponding + /// component of 'y' is returned. Components of 'x' and 'y' that + /// are not selected are allowed to be invalid floating point + /// values and will have no effect on the results. Thus, this + /// provides different functionality than + /// genType mix(genType x, genType y, genType(a)) + /// where a is a Boolean vector. + /// + /// @see GLSL mix man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + /// + /// @param[in] x Value to interpolate. + /// @param[in] y Value to interpolate. + /// @param[in] a Interpolant. + /// + /// @tparam genTypeT Floating point scalar or vector. + /// @tparam genTypeU Floating point or boolean scalar or vector. It can't be a vector if it is the length of genTypeT. + /// + /// @code + /// #include + /// ... + /// float a; + /// bool b; + /// glm::dvec3 e; + /// glm::dvec3 f; + /// glm::vec4 g; + /// glm::vec4 h; + /// ... + /// glm::vec4 r = glm::mix(g, h, a); // Interpolate with a floating-point scalar two vectors. + /// glm::vec4 s = glm::mix(g, h, b); // Returns g or h; + /// glm::dvec3 t = glm::mix(e, f, a); // Types of the third parameter is not required to match with the first and the second. + /// glm::vec4 u = glm::mix(g, h, r); // Interpolations can be perform per component with a vector for the last parameter. + /// @endcode + template + GLM_FUNC_DECL GLM_CONSTEXPR genTypeT mix(genTypeT x, genTypeT y, genTypeU a); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec mix(vec const& x, vec const& y, vec const& a); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec mix(vec const& x, vec const& y, U a); + + /// Returns 0.0 if x < edge, otherwise it returns 1.0 for each component of a genType. + /// + /// @see GLSL step man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType step(genType edge, genType x); + + /// Returns 0.0 if x < edge, otherwise it returns 1.0. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL step man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec step(T edge, vec const& x); + + /// Returns 0.0 if x < edge, otherwise it returns 1.0. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL step man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec step(vec const& edge, vec const& x); + + /// Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and + /// performs smooth Hermite interpolation between 0 and 1 + /// when edge0 < x < edge1. This is useful in cases where + /// you would want a threshold function with a smooth + /// transition. This is equivalent to: + /// genType t; + /// t = clamp ((x - edge0) / (edge1 - edge0), 0, 1); + /// return t * t * (3 - 2 * t); + /// Results are undefined if edge0 >= edge1. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL smoothstep man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType smoothstep(genType edge0, genType edge1, genType x); + + template + GLM_FUNC_DECL vec smoothstep(T edge0, T edge1, vec const& x); + + template + GLM_FUNC_DECL vec smoothstep(vec const& edge0, vec const& edge1, vec const& x); + + /// Returns true if x holds a NaN (not a number) + /// representation in the underlying implementation's set of + /// floating point representations. Returns false otherwise, + /// including for implementations with no NaN + /// representations. + /// + /// /!\ When using compiler fast math, this function may fail. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL isnan man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec isnan(vec const& x); + + /// Returns true if x holds a positive infinity or negative + /// infinity representation in the underlying implementation's + /// set of floating point representations. Returns false + /// otherwise, including for implementations with no infinity + /// representations. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL isinf man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec isinf(vec const& x); + + /// Returns a signed integer value representing + /// the encoding of a floating-point value. The floating-point + /// value's bit-level representation is preserved. + /// + /// @see GLSL floatBitsToInt man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + GLM_FUNC_DECL int floatBitsToInt(float v); + + /// Returns a signed integer value representing + /// the encoding of a floating-point value. The floatingpoint + /// value's bit-level representation is preserved. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL floatBitsToInt man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec floatBitsToInt(vec const& v); + + /// Returns a unsigned integer value representing + /// the encoding of a floating-point value. The floatingpoint + /// value's bit-level representation is preserved. + /// + /// @see GLSL floatBitsToUint man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + GLM_FUNC_DECL uint floatBitsToUint(float v); + + /// Returns a unsigned integer value representing + /// the encoding of a floating-point value. The floatingpoint + /// value's bit-level representation is preserved. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL floatBitsToUint man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec floatBitsToUint(vec const& v); + + /// Returns a floating-point value corresponding to a signed + /// integer encoding of a floating-point value. + /// If an inf or NaN is passed in, it will not signal, and the + /// resulting floating point value is unspecified. Otherwise, + /// the bit-level representation is preserved. + /// + /// @see GLSL intBitsToFloat man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + GLM_FUNC_DECL float intBitsToFloat(int v); + + /// Returns a floating-point value corresponding to a signed + /// integer encoding of a floating-point value. + /// If an inf or NaN is passed in, it will not signal, and the + /// resulting floating point value is unspecified. Otherwise, + /// the bit-level representation is preserved. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL intBitsToFloat man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec intBitsToFloat(vec const& v); + + /// Returns a floating-point value corresponding to a + /// unsigned integer encoding of a floating-point value. + /// If an inf or NaN is passed in, it will not signal, and the + /// resulting floating point value is unspecified. Otherwise, + /// the bit-level representation is preserved. + /// + /// @see GLSL uintBitsToFloat man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + GLM_FUNC_DECL float uintBitsToFloat(uint v); + + /// Returns a floating-point value corresponding to a + /// unsigned integer encoding of a floating-point value. + /// If an inf or NaN is passed in, it will not signal, and the + /// resulting floating point value is unspecified. Otherwise, + /// the bit-level representation is preserved. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL uintBitsToFloat man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec uintBitsToFloat(vec const& v); + + /// Computes and returns a * b + c. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL fma man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType fma(genType const& a, genType const& b, genType const& c); + + /// Splits x into a floating-point significand in the range + /// [0.5, 1.0) and an integral exponent of two, such that: + /// x = significand * exp(2, exponent) + /// + /// The significand is returned by the function and the + /// exponent is returned in the parameter exp. For a + /// floating-point value of zero, the significant and exponent + /// are both zero. For a floating-point value that is an + /// infinity or is not a number, the results are undefined. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL frexp man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType frexp(genType x, int& exp); + + template + GLM_FUNC_DECL vec frexp(vec const& v, vec& exp); + + /// Builds a floating-point number from x and the + /// corresponding integral exponent of two in exp, returning: + /// significand * exp(2, exponent) + /// + /// If this product is too large to be represented in the + /// floating-point type, the result is undefined. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL ldexp man page; + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType ldexp(genType const& x, int const& exp); + + template + GLM_FUNC_DECL vec ldexp(vec const& v, vec const& exp); + + /// @} +}//namespace glm + +#include "detail/func_common.inl" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/_features.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/_features.hpp new file mode 100644 index 000000000000..b0cbe9ff02cf --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/_features.hpp @@ -0,0 +1,394 @@ +#pragma once + +// #define GLM_CXX98_EXCEPTIONS +// #define GLM_CXX98_RTTI + +// #define GLM_CXX11_RVALUE_REFERENCES +// Rvalue references - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n2118.html + +// GLM_CXX11_TRAILING_RETURN +// Rvalue references for *this - GCC not supported +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2439.htm + +// GLM_CXX11_NONSTATIC_MEMBER_INIT +// Initialization of class objects by rvalues - GCC any +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1610.html + +// GLM_CXX11_NONSTATIC_MEMBER_INIT +// Non-static data member initializers - GCC 4.7 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2008/n2756.htm + +// #define GLM_CXX11_VARIADIC_TEMPLATE +// Variadic templates - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2242.pdf + +// +// Extending variadic template template parameters - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2555.pdf + +// #define GLM_CXX11_GENERALIZED_INITIALIZERS +// Initializer lists - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm + +// #define GLM_CXX11_STATIC_ASSERT +// Static assertions - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1720.html + +// #define GLM_CXX11_AUTO_TYPE +// auto-typed variables - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1984.pdf + +// #define GLM_CXX11_AUTO_TYPE +// Multi-declarator auto - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1737.pdf + +// #define GLM_CXX11_AUTO_TYPE +// Removal of auto as a storage-class specifier - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2546.htm + +// #define GLM_CXX11_AUTO_TYPE +// New function declarator syntax - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2541.htm + +// #define GLM_CXX11_LAMBDAS +// New wording for C++0x lambdas - GCC 4.5 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2927.pdf + +// #define GLM_CXX11_DECLTYPE +// Declared type of an expression - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2343.pdf + +// +// Right angle brackets - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1757.html + +// +// Default template arguments for function templates DR226 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#226 + +// +// Solving the SFINAE problem for expressions DR339 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2634.html + +// #define GLM_CXX11_ALIAS_TEMPLATE +// Template aliases N2258 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf + +// +// Extern templates N1987 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1987.htm + +// #define GLM_CXX11_NULLPTR +// Null pointer constant N2431 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2431.pdf + +// #define GLM_CXX11_STRONG_ENUMS +// Strongly-typed enums N2347 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2347.pdf + +// +// Forward declarations for enums N2764 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2764.pdf + +// +// Generalized attributes N2761 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2761.pdf + +// +// Generalized constant expressions N2235 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf + +// +// Alignment support N2341 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf + +// #define GLM_CXX11_DELEGATING_CONSTRUCTORS +// Delegating constructors N1986 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1986.pdf + +// +// Inheriting constructors N2540 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2540.htm + +// #define GLM_CXX11_EXPLICIT_CONVERSIONS +// Explicit conversion operators N2437 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf + +// +// New character types N2249 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2249.html + +// +// Unicode string literals N2442 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm + +// +// Raw string literals N2442 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm + +// +// Universal character name literals N2170 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2170.html + +// #define GLM_CXX11_USER_LITERALS +// User-defined literals N2765 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2765.pdf + +// +// Standard Layout Types N2342 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2342.htm + +// #define GLM_CXX11_DEFAULTED_FUNCTIONS +// #define GLM_CXX11_DELETED_FUNCTIONS +// Defaulted and deleted functions N2346 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2346.htm + +// +// Extended friend declarations N1791 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1791.pdf + +// +// Extending sizeof N2253 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2253.html + +// #define GLM_CXX11_INLINE_NAMESPACES +// Inline namespaces N2535 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2535.htm + +// #define GLM_CXX11_UNRESTRICTED_UNIONS +// Unrestricted unions N2544 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf + +// #define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS +// Local and unnamed types as template arguments N2657 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm + +// #define GLM_CXX11_RANGE_FOR +// Range-based for N2930 GCC 4.6 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2930.html + +// #define GLM_CXX11_OVERRIDE_CONTROL +// Explicit virtual overrides N2928 N3206 N3272 GCC 4.7 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2928.htm +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3206.htm +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3272.htm + +// +// Minimal support for garbage collection and reachability-based leak detection N2670 No +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2670.htm + +// #define GLM_CXX11_NOEXCEPT +// Allowing move constructors to throw [noexcept] N3050 GCC 4.6 (core language only) +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3050.html + +// +// Defining move special member functions N3053 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3053.html + +// +// Sequence points N2239 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html + +// +// Atomic operations N2427 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html + +// +// Strong Compare and Exchange N2748 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html + +// +// Bidirectional Fences N2752 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2752.htm + +// +// Memory model N2429 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2429.htm + +// +// Data-dependency ordering: atomics and memory model N2664 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm + +// +// Propagating exceptions N2179 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2179.html + +// +// Abandoning a process and at_quick_exit N2440 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2440.htm + +// +// Allow atomics use in signal handlers N2547 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2547.htm + +// +// Thread-local storage N2659 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2659.htm + +// +// Dynamic initialization and destruction with concurrency N2660 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2660.htm + +// +// __func__ predefined identifier N2340 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2340.htm + +// +// C99 preprocessor N1653 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1653.htm + +// +// long long N1811 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1811.pdf + +// +// Extended integral types N1988 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1988.pdf + +#if(GLM_COMPILER & GLM_COMPILER_GCC) + +# define GLM_CXX11_STATIC_ASSERT + +#elif(GLM_COMPILER & GLM_COMPILER_CLANG) +# if(__has_feature(cxx_exceptions)) +# define GLM_CXX98_EXCEPTIONS +# endif + +# if(__has_feature(cxx_rtti)) +# define GLM_CXX98_RTTI +# endif + +# if(__has_feature(cxx_access_control_sfinae)) +# define GLM_CXX11_ACCESS_CONTROL_SFINAE +# endif + +# if(__has_feature(cxx_alias_templates)) +# define GLM_CXX11_ALIAS_TEMPLATE +# endif + +# if(__has_feature(cxx_alignas)) +# define GLM_CXX11_ALIGNAS +# endif + +# if(__has_feature(cxx_attributes)) +# define GLM_CXX11_ATTRIBUTES +# endif + +# if(__has_feature(cxx_constexpr)) +# define GLM_CXX11_CONSTEXPR +# endif + +# if(__has_feature(cxx_decltype)) +# define GLM_CXX11_DECLTYPE +# endif + +# if(__has_feature(cxx_default_function_template_args)) +# define GLM_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS +# endif + +# if(__has_feature(cxx_defaulted_functions)) +# define GLM_CXX11_DEFAULTED_FUNCTIONS +# endif + +# if(__has_feature(cxx_delegating_constructors)) +# define GLM_CXX11_DELEGATING_CONSTRUCTORS +# endif + +# if(__has_feature(cxx_deleted_functions)) +# define GLM_CXX11_DELETED_FUNCTIONS +# endif + +# if(__has_feature(cxx_explicit_conversions)) +# define GLM_CXX11_EXPLICIT_CONVERSIONS +# endif + +# if(__has_feature(cxx_generalized_initializers)) +# define GLM_CXX11_GENERALIZED_INITIALIZERS +# endif + +# if(__has_feature(cxx_implicit_moves)) +# define GLM_CXX11_IMPLICIT_MOVES +# endif + +# if(__has_feature(cxx_inheriting_constructors)) +# define GLM_CXX11_INHERITING_CONSTRUCTORS +# endif + +# if(__has_feature(cxx_inline_namespaces)) +# define GLM_CXX11_INLINE_NAMESPACES +# endif + +# if(__has_feature(cxx_lambdas)) +# define GLM_CXX11_LAMBDAS +# endif + +# if(__has_feature(cxx_local_type_template_args)) +# define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS +# endif + +# if(__has_feature(cxx_noexcept)) +# define GLM_CXX11_NOEXCEPT +# endif + +# if(__has_feature(cxx_nonstatic_member_init)) +# define GLM_CXX11_NONSTATIC_MEMBER_INIT +# endif + +# if(__has_feature(cxx_nullptr)) +# define GLM_CXX11_NULLPTR +# endif + +# if(__has_feature(cxx_override_control)) +# define GLM_CXX11_OVERRIDE_CONTROL +# endif + +# if(__has_feature(cxx_reference_qualified_functions)) +# define GLM_CXX11_REFERENCE_QUALIFIED_FUNCTIONS +# endif + +# if(__has_feature(cxx_range_for)) +# define GLM_CXX11_RANGE_FOR +# endif + +# if(__has_feature(cxx_raw_string_literals)) +# define GLM_CXX11_RAW_STRING_LITERALS +# endif + +# if(__has_feature(cxx_rvalue_references)) +# define GLM_CXX11_RVALUE_REFERENCES +# endif + +# if(__has_feature(cxx_static_assert)) +# define GLM_CXX11_STATIC_ASSERT +# endif + +# if(__has_feature(cxx_auto_type)) +# define GLM_CXX11_AUTO_TYPE +# endif + +# if(__has_feature(cxx_strong_enums)) +# define GLM_CXX11_STRONG_ENUMS +# endif + +# if(__has_feature(cxx_trailing_return)) +# define GLM_CXX11_TRAILING_RETURN +# endif + +# if(__has_feature(cxx_unicode_literals)) +# define GLM_CXX11_UNICODE_LITERALS +# endif + +# if(__has_feature(cxx_unrestricted_unions)) +# define GLM_CXX11_UNRESTRICTED_UNIONS +# endif + +# if(__has_feature(cxx_user_literals)) +# define GLM_CXX11_USER_LITERALS +# endif + +# if(__has_feature(cxx_variadic_templates)) +# define GLM_CXX11_VARIADIC_TEMPLATES +# endif + +#endif//(GLM_COMPILER & GLM_COMPILER_CLANG) diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/_fixes.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/_fixes.hpp new file mode 100644 index 000000000000..a503c7c0d041 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/_fixes.hpp @@ -0,0 +1,27 @@ +#include + +//! Workaround for compatibility with other libraries +#ifdef max +#undef max +#endif + +//! Workaround for compatibility with other libraries +#ifdef min +#undef min +#endif + +//! Workaround for Android +#ifdef isnan +#undef isnan +#endif + +//! Workaround for Android +#ifdef isinf +#undef isinf +#endif + +//! Workaround for Chrone Native Client +#ifdef log2 +#undef log2 +#endif + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/_noise.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/_noise.hpp new file mode 100644 index 000000000000..5a874a02221f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/_noise.hpp @@ -0,0 +1,81 @@ +#pragma once + +#include "../common.hpp" + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_QUALIFIER T mod289(T const& x) + { + return x - floor(x * (static_cast(1.0) / static_cast(289.0))) * static_cast(289.0); + } + + template + GLM_FUNC_QUALIFIER T permute(T const& x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> permute(vec<2, T, Q> const& x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> permute(vec<3, T, Q> const& x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> permute(vec<4, T, Q> const& x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER T taylorInvSqrt(T const& r) + { + return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> taylorInvSqrt(vec<2, T, Q> const& r) + { + return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> taylorInvSqrt(vec<3, T, Q> const& r) + { + return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> taylorInvSqrt(vec<4, T, Q> const& r) + { + return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> fade(vec<2, T, Q> const& t) + { + return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> fade(vec<3, T, Q> const& t) + { + return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> fade(vec<4, T, Q> const& t) + { + return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); + } +}//namespace detail +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/_swizzle.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/_swizzle.hpp new file mode 100644 index 000000000000..87896ef4f6f2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/_swizzle.hpp @@ -0,0 +1,804 @@ +#pragma once + +namespace glm{ +namespace detail +{ + // Internal class for implementing swizzle operators + template + struct _swizzle_base0 + { + protected: + GLM_FUNC_QUALIFIER T& elem(size_t i){ return (reinterpret_cast(_buffer))[i]; } + GLM_FUNC_QUALIFIER T const& elem(size_t i) const{ return (reinterpret_cast(_buffer))[i]; } + + // Use an opaque buffer to *ensure* the compiler doesn't call a constructor. + // The size 1 buffer is assumed to aligned to the actual members so that the + // elem() + char _buffer[1]; + }; + + template + struct _swizzle_base1 : public _swizzle_base0 + { + }; + + template + struct _swizzle_base1<2, T, Q, E0,E1,-1,-2, Aligned> : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec<2, T, Q> operator ()() const { return vec<2, T, Q>(this->elem(E0), this->elem(E1)); } + }; + + template + struct _swizzle_base1<3, T, Q, E0,E1,E2,-1, Aligned> : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec<3, T, Q> operator ()() const { return vec<3, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2)); } + }; + + template + struct _swizzle_base1<4, T, Q, E0,E1,E2,E3, Aligned> : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec<4, T, Q> operator ()() const { return vec<4, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2), this->elem(E3)); } + }; + + // Internal class for implementing swizzle operators + /* + Template parameters: + + T = type of scalar values (e.g. float, double) + N = number of components in the vector (e.g. 3) + E0...3 = what index the n-th element of this swizzle refers to in the unswizzled vec + + DUPLICATE_ELEMENTS = 1 if there is a repeated element, 0 otherwise (used to specialize swizzles + containing duplicate elements so that they cannot be used as r-values). + */ + template + struct _swizzle_base2 : public _swizzle_base1::value> + { + struct op_equal + { + GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e = t; } + }; + + struct op_minus + { + GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e -= t; } + }; + + struct op_plus + { + GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e += t; } + }; + + struct op_mul + { + GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e *= t; } + }; + + struct op_div + { + GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e /= t; } + }; + + public: + GLM_FUNC_QUALIFIER _swizzle_base2& operator= (const T& t) + { + for (int i = 0; i < N; ++i) + (*this)[i] = t; + return *this; + } + + GLM_FUNC_QUALIFIER _swizzle_base2& operator= (vec const& that) + { + _apply_op(that, op_equal()); + return *this; + } + + GLM_FUNC_QUALIFIER void operator -= (vec const& that) + { + _apply_op(that, op_minus()); + } + + GLM_FUNC_QUALIFIER void operator += (vec const& that) + { + _apply_op(that, op_plus()); + } + + GLM_FUNC_QUALIFIER void operator *= (vec const& that) + { + _apply_op(that, op_mul()); + } + + GLM_FUNC_QUALIFIER void operator /= (vec const& that) + { + _apply_op(that, op_div()); + } + + GLM_FUNC_QUALIFIER T& operator[](size_t i) + { + const int offset_dst[4] = { E0, E1, E2, E3 }; + return this->elem(offset_dst[i]); + } + GLM_FUNC_QUALIFIER T operator[](size_t i) const + { + const int offset_dst[4] = { E0, E1, E2, E3 }; + return this->elem(offset_dst[i]); + } + + protected: + template + GLM_FUNC_QUALIFIER void _apply_op(vec const& that, const U& op) + { + // Make a copy of the data in this == &that. + // The copier should optimize out the copy in cases where the function is + // properly inlined and the copy is not necessary. + T t[N]; + for (int i = 0; i < N; ++i) + t[i] = that[i]; + for (int i = 0; i < N; ++i) + op( (*this)[i], t[i] ); + } + }; + + // Specialization for swizzles containing duplicate elements. These cannot be modified. + template + struct _swizzle_base2 : public _swizzle_base1::value> + { + struct Stub {}; + + GLM_FUNC_QUALIFIER _swizzle_base2& operator= (Stub const&) { return *this; } + + GLM_FUNC_QUALIFIER T operator[] (size_t i) const + { + const int offset_dst[4] = { E0, E1, E2, E3 }; + return this->elem(offset_dst[i]); + } + }; + + template + struct _swizzle : public _swizzle_base2 + { + typedef _swizzle_base2 base_type; + + using base_type::operator=; + + GLM_FUNC_QUALIFIER operator vec () const { return (*this)(); } + }; + +// +// To prevent the C++ syntax from getting entirely overwhelming, define some alias macros +// +#define GLM_SWIZZLE_TEMPLATE1 template +#define GLM_SWIZZLE_TEMPLATE2 template +#define GLM_SWIZZLE_TYPE1 _swizzle +#define GLM_SWIZZLE_TYPE2 _swizzle + +// +// Wrapper for a binary operator (e.g. u.yy + v.zy) +// +#define GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \ + GLM_SWIZZLE_TEMPLATE2 \ + GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \ + { \ + return a() OPERAND b(); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const vec& b) \ + { \ + return a() OPERAND b; \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER vec operator OPERAND ( const vec& a, const GLM_SWIZZLE_TYPE1& b) \ + { \ + return a OPERAND b(); \ + } + +// +// Wrapper for a operand between a swizzle and a binary (e.g. 1.0f - u.xyz) +// +#define GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const T& b) \ + { \ + return a() OPERAND b; \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER vec operator OPERAND ( const T& a, const GLM_SWIZZLE_TYPE1& b) \ + { \ + return a OPERAND b(); \ + } + +// +// Macro for wrapping a function taking one argument (e.g. abs()) +// +#define GLM_SWIZZLE_FUNCTION_1_ARGS(RETURN_TYPE,FUNCTION) \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a) \ + { \ + return FUNCTION(a()); \ + } + +// +// Macro for wrapping a function taking two vector arguments (e.g. dot()). +// +#define GLM_SWIZZLE_FUNCTION_2_ARGS(RETURN_TYPE,FUNCTION) \ + GLM_SWIZZLE_TEMPLATE2 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \ + { \ + return FUNCTION(a(), b()); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b) \ + { \ + return FUNCTION(a(), b()); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename V& b) \ + { \ + return FUNCTION(a(), b); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const V& a, const GLM_SWIZZLE_TYPE1& b) \ + { \ + return FUNCTION(a, b()); \ + } + +// +// Macro for wrapping a function take 2 vec arguments followed by a scalar (e.g. mix()). +// +#define GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(RETURN_TYPE,FUNCTION) \ + GLM_SWIZZLE_TEMPLATE2 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b, const T& c) \ + { \ + return FUNCTION(a(), b(), c); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \ + { \ + return FUNCTION(a(), b(), c); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename S0::vec_type& b, const T& c)\ + { \ + return FUNCTION(a(), b, c); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const typename V& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \ + { \ + return FUNCTION(a, b(), c); \ + } + +}//namespace detail +}//namespace glm + +namespace glm +{ + namespace detail + { + GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(-) + GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(*) + GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(+) + GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(-) + GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(*) + GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(/) + } + + // + // Swizzles are distinct types from the unswizzled type. The below macros will + // provide template specializations for the swizzle types for the given functions + // so that the compiler does not have any ambiguity to choosing how to handle + // the function. + // + // The alternative is to use the operator()() when calling the function in order + // to explicitly convert the swizzled type to the unswizzled type. + // + + //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, abs); + //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acos); + //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acosh); + //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, all); + //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, any); + + //GLM_SWIZZLE_FUNCTION_2_ARGS(value_type, dot); + //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, cross); + //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, step); + //GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(vec_type, mix); +} + +#define GLM_SWIZZLE2_2_MEMBERS(T, Q, E0,E1) \ + struct { detail::_swizzle<2, T, Q, 0,0,-1,-2> E0 ## E0; }; \ + struct { detail::_swizzle<2, T, Q, 0,1,-1,-2> E0 ## E1; }; \ + struct { detail::_swizzle<2, T, Q, 1,0,-1,-2> E1 ## E0; }; \ + struct { detail::_swizzle<2, T, Q, 1,1,-1,-2> E1 ## E1; }; + +#define GLM_SWIZZLE2_3_MEMBERS(T, Q, E0,E1) \ + struct { detail::_swizzle<3,T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<3,T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<3,T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<3,T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<3,T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<3,T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<3,T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<3,T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; + +#define GLM_SWIZZLE2_4_MEMBERS(T, Q, E0,E1) \ + struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; + +#define GLM_SWIZZLE3_2_MEMBERS(T, Q, E0,E1,E2) \ + struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; }; + +#define GLM_SWIZZLE3_3_MEMBERS(T, Q ,E0,E1,E2) \ + struct { detail::_swizzle<3, T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,0,2,-1> E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,2,-1> E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,0,-1> E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,1,-1> E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,2,-1> E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,2,-1> E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,2,-1> E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,0,-1> E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,1,-1> E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,2,-1> E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,0,-1> E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,1,-1> E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,2,-1> E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,0,-1> E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,1,-1> E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,2,-1> E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,0,-1> E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,1,-1> E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,2,-1> E2 ## E2 ## E2; }; + +#define GLM_SWIZZLE3_4_MEMBERS(T, Q, E0,E1,E2) \ + struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; + +#define GLM_SWIZZLE4_2_MEMBERS(T, Q, E0,E1,E2,E3) \ + struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 0,3,-1,-2> E0 ## E3; }; \ + struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 1,3,-1,-2> E1 ## E3; }; \ + struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 2,3,-1,-2> E2 ## E3; }; \ + struct { detail::_swizzle<2,T, Q, 3,0,-1,-2> E3 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 3,1,-1,-2> E3 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 3,2,-1,-2> E3 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 3,3,-1,-2> E3 ## E3; }; + +#define GLM_SWIZZLE4_3_MEMBERS(T, Q, E0,E1,E2,E3) \ + struct { detail::_swizzle<3, T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,0,2,-1> E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,0,3,-1> E0 ## E0 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,2,-1> E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,3,-1> E0 ## E1 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,0,-1> E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,1,-1> E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,2,-1> E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,3,-1> E0 ## E2 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 0,3,0,-1> E0 ## E3 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,3,1,-1> E0 ## E3 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,3,2,-1> E0 ## E3 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,3,3,-1> E0 ## E3 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,2,-1> E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,3,-1> E1 ## E0 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,2,-1> E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,3,-1> E1 ## E1 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,0,-1> E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,1,-1> E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,2,-1> E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,3,-1> E1 ## E2 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 1,3,0,-1> E1 ## E3 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,3,1,-1> E1 ## E3 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,3,2,-1> E1 ## E3 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,3,3,-1> E1 ## E3 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,0,-1> E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,1,-1> E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,2,-1> E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,3,-1> E2 ## E0 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,0,-1> E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,1,-1> E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,2,-1> E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,3,-1> E2 ## E1 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,0,-1> E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,1,-1> E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,2,-1> E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,3,-1> E2 ## E2 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 2,3,0,-1> E2 ## E3 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,3,1,-1> E2 ## E3 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,3,2,-1> E2 ## E3 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,3,3,-1> E2 ## E3 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 3,0,0,-1> E3 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 3,0,1,-1> E3 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 3,0,2,-1> E3 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 3,0,3,-1> E3 ## E0 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 3,1,0,-1> E3 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 3,1,1,-1> E3 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 3,1,2,-1> E3 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 3,1,3,-1> E3 ## E1 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 3,2,0,-1> E3 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 3,2,1,-1> E3 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 3,2,2,-1> E3 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 3,2,3,-1> E3 ## E2 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 3,3,0,-1> E3 ## E3 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 3,3,1,-1> E3 ## E3 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 3,3,2,-1> E3 ## E3 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 3,3,3,-1> E3 ## E3 ## E3; }; + +#define GLM_SWIZZLE4_4_MEMBERS(T, Q, E0,E1,E2,E3) \ + struct { detail::_swizzle<4, T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,0,3> E0 ## E0 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,1,3> E0 ## E0 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,2,3> E0 ## E0 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,3,0> E0 ## E0 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,3,1> E0 ## E0 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,3,2> E0 ## E0 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,3,3> E0 ## E0 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,0,3> E0 ## E1 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,1,3> E0 ## E1 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,2,3> E0 ## E1 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,3,0> E0 ## E1 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,3,1> E0 ## E1 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,3,2> E0 ## E1 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,3,3> E0 ## E1 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,0,3> E0 ## E2 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,1,3> E0 ## E2 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,2,3> E0 ## E2 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,3,0> E0 ## E2 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,3,1> E0 ## E2 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,3,2> E0 ## E2 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,3,3> E0 ## E2 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,0,0> E0 ## E3 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,0,1> E0 ## E3 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,0,2> E0 ## E3 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,0,3> E0 ## E3 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,1,0> E0 ## E3 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,1,1> E0 ## E3 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,1,2> E0 ## E3 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,1,3> E0 ## E3 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,2,0> E0 ## E3 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,2,1> E0 ## E3 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,2,2> E0 ## E3 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,2,3> E0 ## E3 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,3,0> E0 ## E3 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,3,1> E0 ## E3 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,3,2> E0 ## E3 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,3,3> E0 ## E3 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,0,3> E1 ## E0 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,1,3> E1 ## E0 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,2,3> E1 ## E0 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,3,0> E1 ## E0 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,3,1> E1 ## E0 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,3,2> E1 ## E0 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,3,3> E1 ## E0 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,0,3> E1 ## E1 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,1,3> E1 ## E1 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,2,3> E1 ## E1 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,3,0> E1 ## E1 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,3,1> E1 ## E1 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,3,2> E1 ## E1 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,3,3> E1 ## E1 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,0,3> E1 ## E2 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,1,3> E1 ## E2 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,2,3> E1 ## E2 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,3,0> E1 ## E2 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,3,1> E1 ## E2 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,3,2> E1 ## E2 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,3,3> E1 ## E2 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,0,0> E1 ## E3 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,0,1> E1 ## E3 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,0,2> E1 ## E3 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,0,3> E1 ## E3 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,1,0> E1 ## E3 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,1,1> E1 ## E3 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,1,2> E1 ## E3 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,1,3> E1 ## E3 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,2,0> E1 ## E3 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,2,1> E1 ## E3 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,2,2> E1 ## E3 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,2,3> E1 ## E3 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,3,0> E1 ## E3 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,3,1> E1 ## E3 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,3,2> E1 ## E3 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,3,3> E1 ## E3 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,0,3> E2 ## E0 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,1,3> E2 ## E0 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,2,3> E2 ## E0 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,3,0> E2 ## E0 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,3,1> E2 ## E0 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,3,2> E2 ## E0 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,3,3> E2 ## E0 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,0,3> E2 ## E1 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,1,3> E2 ## E1 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,2,3> E2 ## E1 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,3,0> E2 ## E1 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,3,1> E2 ## E1 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,3,2> E2 ## E1 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,3,3> E2 ## E1 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,0,3> E2 ## E2 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,1,3> E2 ## E2 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,2,3> E2 ## E2 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,3,0> E2 ## E2 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,3,1> E2 ## E2 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,3,2> E2 ## E2 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,3,3> E2 ## E2 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,0,0> E2 ## E3 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,0,1> E2 ## E3 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,0,2> E2 ## E3 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,0,3> E2 ## E3 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,1,0> E2 ## E3 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,1,1> E2 ## E3 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,1,2> E2 ## E3 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,1,3> E2 ## E3 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,2,0> E2 ## E3 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,2,1> E2 ## E3 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,2,2> E2 ## E3 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,2,3> E2 ## E3 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,3,0> E2 ## E3 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,3,1> E2 ## E3 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,3,2> E2 ## E3 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,3,3> E2 ## E3 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,0,0> E3 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,0,1> E3 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,0,2> E3 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,0,3> E3 ## E0 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,1,0> E3 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,1,1> E3 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,1,2> E3 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,1,3> E3 ## E0 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,2,0> E3 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,2,1> E3 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,2,2> E3 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,2,3> E3 ## E0 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,3,0> E3 ## E0 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,3,1> E3 ## E0 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,3,2> E3 ## E0 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,3,3> E3 ## E0 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,0,0> E3 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,0,1> E3 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,0,2> E3 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,0,3> E3 ## E1 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,1,0> E3 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,1,1> E3 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,1,2> E3 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,1,3> E3 ## E1 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,2,0> E3 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,2,1> E3 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,2,2> E3 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,2,3> E3 ## E1 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,3,0> E3 ## E1 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,3,1> E3 ## E1 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,3,2> E3 ## E1 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,3,3> E3 ## E1 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,0,0> E3 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,0,1> E3 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,0,2> E3 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,0,3> E3 ## E2 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,1,0> E3 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,1,1> E3 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,1,2> E3 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,1,3> E3 ## E2 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,2,0> E3 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,2,1> E3 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,2,2> E3 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,2,3> E3 ## E2 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,3,0> E3 ## E2 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,3,1> E3 ## E2 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,3,2> E3 ## E2 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,3,3> E3 ## E2 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,0,0> E3 ## E3 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,0,1> E3 ## E3 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,0,2> E3 ## E3 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,0,3> E3 ## E3 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,1,0> E3 ## E3 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,1,1> E3 ## E3 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,1,2> E3 ## E3 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,1,3> E3 ## E3 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,2,0> E3 ## E3 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,2,1> E3 ## E3 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,2,2> E3 ## E3 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,2,3> E3 ## E3 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,3,0> E3 ## E3 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,3,1> E3 ## E3 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,3,2> E3 ## E3 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,3,3> E3 ## E3 ## E3 ## E3; }; diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/_swizzle_func.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/_swizzle_func.hpp new file mode 100644 index 000000000000..a264ae9eff3a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/_swizzle_func.hpp @@ -0,0 +1,682 @@ +#pragma once + +#define GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, CONST, A, B) \ + GLM_FUNC_QUALIFIER vec<2, T, Q> A ## B() CONST \ + { \ + return vec<2, T, Q>(this->A, this->B); \ + } + +#define GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, CONST, A, B, C) \ + GLM_FUNC_QUALIFIER vec<3, T, Q> A ## B ## C() CONST \ + { \ + return vec<3, T, Q>(this->A, this->B, this->C); \ + } + +#define GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, CONST, A, B, C, D) \ + GLM_FUNC_QUALIFIER vec<4, T, Q> A ## B ## C ## D() CONST \ + { \ + return vec<4, T, Q>(this->A, this->B, this->C, this->D); \ + } + +#define GLM_SWIZZLE_GEN_VEC2_ENTRY_DEF(T, P, L, CONST, A, B) \ + template \ + GLM_FUNC_QUALIFIER vec vec::A ## B() CONST \ + { \ + return vec<2, T, Q>(this->A, this->B); \ + } + +#define GLM_SWIZZLE_GEN_VEC3_ENTRY_DEF(T, P, L, CONST, A, B, C) \ + template \ + GLM_FUNC_QUALIFIER vec<3, T, Q> vec::A ## B ## C() CONST \ + { \ + return vec<3, T, Q>(this->A, this->B, this->C); \ + } + +#define GLM_SWIZZLE_GEN_VEC4_ENTRY_DEF(T, P, L, CONST, A, B, C, D) \ + template \ + GLM_FUNC_QUALIFIER vec<4, T, Q> vec::A ## B ## C ## D() CONST \ + { \ + return vec<4, T, Q>(this->A, this->B, this->C, this->D); \ + } + +#define GLM_MUTABLE + +#define GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, B, A) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC2(T, P) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, x, y) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, r, g) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, s, t) + +#define GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B) + +#define GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, B, A) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC3(T, P) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, x, y, z) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, r, g, b) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, s, t, p) + +#define GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, C) + +#define GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, B) + +#define GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, C, A) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC4(T, P) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, x, y, z, w) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, r, g, b, a) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, s, t, p, q) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, P) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, x, y) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, r, g) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, s, t) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C) + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, P) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, x, y, z) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, r, g, b) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, s, t, p) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, D) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, D) + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, D) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, P) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, x, y, z, w) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, r, g, b, a) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, s, t, p, q) + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/_vectorize.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/_vectorize.hpp new file mode 100644 index 000000000000..1fcaec315284 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/_vectorize.hpp @@ -0,0 +1,162 @@ +#pragma once + +namespace glm{ +namespace detail +{ + template class vec, length_t L, typename R, typename T, qualifier Q> + struct functor1{}; + + template class vec, typename R, typename T, qualifier Q> + struct functor1 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<1, R, Q> call(R (*Func) (T x), vec<1, T, Q> const& v) + { + return vec<1, R, Q>(Func(v.x)); + } + }; + + template class vec, typename R, typename T, qualifier Q> + struct functor1 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<2, R, Q> call(R (*Func) (T x), vec<2, T, Q> const& v) + { + return vec<2, R, Q>(Func(v.x), Func(v.y)); + } + }; + + template class vec, typename R, typename T, qualifier Q> + struct functor1 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, R, Q> call(R (*Func) (T x), vec<3, T, Q> const& v) + { + return vec<3, R, Q>(Func(v.x), Func(v.y), Func(v.z)); + } + }; + + template class vec, typename R, typename T, qualifier Q> + struct functor1 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, R, Q> call(R (*Func) (T x), vec<4, T, Q> const& v) + { + return vec<4, R, Q>(Func(v.x), Func(v.y), Func(v.z), Func(v.w)); + } + }; + + template class vec, length_t L, typename T, qualifier Q> + struct functor2{}; + + template class vec, typename T, qualifier Q> + struct functor2 + { + GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, vec<1, T, Q> const& b) + { + return vec<1, T, Q>(Func(a.x, b.x)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2 + { + GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, vec<2, T, Q> const& b) + { + return vec<2, T, Q>(Func(a.x, b.x), Func(a.y, b.y)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2 + { + GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, vec<3, T, Q> const& b) + { + return vec<3, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2 + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w)); + } + }; + + template class vec, length_t L, typename T, qualifier Q> + struct functor2_vec_sca{}; + + template class vec, typename T, qualifier Q> + struct functor2_vec_sca + { + GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, T b) + { + return vec<1, T, Q>(Func(a.x, b)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2_vec_sca + { + GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, T b) + { + return vec<2, T, Q>(Func(a.x, b), Func(a.y, b)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2_vec_sca + { + GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, T b) + { + return vec<3, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2_vec_sca + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, T b) + { + return vec<4, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b), Func(a.w, b)); + } + }; + + template + struct functor2_vec_int {}; + + template + struct functor2_vec_int<1, T, Q> + { + GLM_FUNC_QUALIFIER static vec<1, int, Q> call(int (*Func) (T x, int y), vec<1, T, Q> const& a, vec<1, int, Q> const& b) + { + return vec<1, int, Q>(Func(a.x, b.x)); + } + }; + + template + struct functor2_vec_int<2, T, Q> + { + GLM_FUNC_QUALIFIER static vec<2, int, Q> call(int (*Func) (T x, int y), vec<2, T, Q> const& a, vec<2, int, Q> const& b) + { + return vec<2, int, Q>(Func(a.x, b.x), Func(a.y, b.y)); + } + }; + + template + struct functor2_vec_int<3, T, Q> + { + GLM_FUNC_QUALIFIER static vec<3, int, Q> call(int (*Func) (T x, int y), vec<3, T, Q> const& a, vec<3, int, Q> const& b) + { + return vec<3, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z)); + } + }; + + template + struct functor2_vec_int<4, T, Q> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(int (*Func) (T x, int y), vec<4, T, Q> const& a, vec<4, int, Q> const& b) + { + return vec<4, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w)); + } + }; +}//namespace detail +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/compute_common.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/compute_common.hpp new file mode 100644 index 000000000000..83362bc7b388 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/compute_common.hpp @@ -0,0 +1,50 @@ +#pragma once + +#include "setup.hpp" +#include + +namespace glm{ +namespace detail +{ + template + struct compute_abs + {}; + + template + struct compute_abs + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_signed, + "'abs' only accept floating-point and integer scalar or vector inputs"); + + return x >= genFIType(0) ? x : -x; + // TODO, perf comp with: *(((int *) &x) + 1) &= 0x7fffffff; + } + }; + +#if (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) + template<> + struct compute_abs + { + GLM_FUNC_QUALIFIER static float call(float x) + { + return fabsf(x); + } + }; +#endif + + template + struct compute_abs + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x) + { + GLM_STATIC_ASSERT( + (!std::numeric_limits::is_signed && std::numeric_limits::is_integer), + "'abs' only accept floating-point and integer scalar or vector inputs"); + return x; + } + }; +}//namespace detail +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/compute_vector_relational.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/compute_vector_relational.hpp new file mode 100644 index 000000000000..167b6345dd39 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/compute_vector_relational.hpp @@ -0,0 +1,30 @@ +#pragma once + +//#include "compute_common.hpp" +#include "setup.hpp" +#include + +namespace glm{ +namespace detail +{ + template + struct compute_equal + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b) + { + return a == b; + } + }; +/* + template + struct compute_equal + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b) + { + return detail::compute_abs::is_signed>::call(b - a) <= static_cast(0); + //return std::memcmp(&a, &b, sizeof(T)) == 0; + } + }; +*/ +}//namespace detail +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_common.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_common.inl new file mode 100644 index 000000000000..c90ba227ae65 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_common.inl @@ -0,0 +1,792 @@ +/// @ref core +/// @file glm/detail/func_common.inl + +#include "../vector_relational.hpp" +#include "compute_common.hpp" +#include "type_vec1.hpp" +#include "type_vec2.hpp" +#include "type_vec3.hpp" +#include "type_vec4.hpp" +#include "_vectorize.hpp" +#include + +namespace glm +{ + // min + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType min(genType x, genType y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); + return (y < x) ? y : x; + } + + // max + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType max(genType x, genType y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); + + return (x < y) ? y : x; + } + + // abs + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR int abs(int x) + { + int const y = x >> (sizeof(int) * 8 - 1); + return (x ^ y) - y; + } + + // round +# if GLM_HAS_CXX11_STL + using ::std::round; +# else + template + GLM_FUNC_QUALIFIER genType round(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'round' only accept floating-point inputs"); + + return x < static_cast(0) ? static_cast(int(x - static_cast(0.5))) : static_cast(int(x + static_cast(0.5))); + } +# endif + + // trunc +# if GLM_HAS_CXX11_STL + using ::std::trunc; +# else + template + GLM_FUNC_QUALIFIER genType trunc(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'trunc' only accept floating-point inputs"); + + return x < static_cast(0) ? -std::floor(-x) : std::floor(x); + } +# endif + +}//namespace glm + +namespace glm{ +namespace detail +{ + template + struct compute_abs_vector + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& x) + { + return detail::functor1::call(abs, x); + } + }; + + template + struct compute_mix_vector + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& x, vec const& y, vec const& a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); + + return vec(vec(x) * (static_cast(1) - a) + vec(y) * a); + } + }; + + template + struct compute_mix_vector + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& x, vec const& y, vec const& a) + { + vec Result; + for(length_t i = 0; i < x.length(); ++i) + Result[i] = a[i] ? y[i] : x[i]; + return Result; + } + }; + + template + struct compute_mix_scalar + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& x, vec const& y, U const& a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); + + return vec(vec(x) * (static_cast(1) - a) + vec(y) * a); + } + }; + + template + struct compute_mix_scalar + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& x, vec const& y, bool const& a) + { + return a ? y : x; + } + }; + + template + struct compute_mix + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(T const& x, T const& y, U const& a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); + + return static_cast(static_cast(x) * (static_cast(1) - a) + static_cast(y) * a); + } + }; + + template + struct compute_mix + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(T const& x, T const& y, bool const& a) + { + return a ? y : x; + } + }; + + template + struct compute_sign + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& x) + { + return vec(glm::lessThan(vec(0), x)) - vec(glm::lessThan(x, vec(0))); + } + }; + +# if GLM_ARCH == GLM_ARCH_X86 + template + struct compute_sign + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& x) + { + T const Shift(static_cast(sizeof(T) * 8 - 1)); + vec const y(vec::type, Q>(-x) >> typename detail::make_unsigned::type(Shift)); + + return (x >> Shift) | y; + } + }; +# endif + + template + struct compute_floor + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(std::floor, x); + } + }; + + template + struct compute_ceil + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(std::ceil, x); + } + }; + + template + struct compute_fract + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return x - floor(x); + } + }; + + template + struct compute_trunc + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(trunc, x); + } + }; + + template + struct compute_round + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(round, x); + } + }; + + template + struct compute_mod + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'mod' only accept floating-point inputs. Include for integer inputs."); + return a - b * floor(a / b); + } + }; + + template + struct compute_min_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y) + { + return detail::functor2::call(min, x, y); + } + }; + + template + struct compute_max_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y) + { + return detail::functor2::call(max, x, y); + } + }; + + template + struct compute_clamp_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& minVal, vec const& maxVal) + { + return min(max(x, minVal), maxVal); + } + }; + + template + struct compute_step_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& edge, vec const& x) + { + return mix(vec(1), vec(0), glm::lessThan(x, edge)); + } + }; + + template + struct compute_smoothstep_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& edge0, vec const& edge1, vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'smoothstep' only accept floating-point inputs"); + vec const tmp(clamp((x - edge0) / (edge1 - edge0), static_cast(0), static_cast(1))); + return tmp * tmp * (static_cast(3) - static_cast(2) * tmp); + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genFIType abs(genFIType x) + { + return detail::compute_abs::is_signed>::call(x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec abs(vec const& x) + { + return detail::compute_abs_vector::value>::call(x); + } + + // sign + // fast and works for any type + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genFIType sign(genFIType x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || (std::numeric_limits::is_signed && std::numeric_limits::is_integer), + "'sign' only accept signed inputs"); + + return detail::compute_sign<1, genFIType, defaultp, + std::numeric_limits::is_iec559, detail::is_aligned::value>::call(vec<1, genFIType>(x)).x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec sign(vec const& x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || (std::numeric_limits::is_signed && std::numeric_limits::is_integer), + "'sign' only accept signed inputs"); + + return detail::compute_sign::is_iec559, detail::is_aligned::value>::call(x); + } + + // floor + using ::std::floor; + template + GLM_FUNC_QUALIFIER vec floor(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'floor' only accept floating-point inputs."); + return detail::compute_floor::value>::call(x); + } + + template + GLM_FUNC_QUALIFIER vec trunc(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'trunc' only accept floating-point inputs"); + return detail::compute_trunc::value>::call(x); + } + + template + GLM_FUNC_QUALIFIER vec round(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'round' only accept floating-point inputs"); + return detail::compute_round::value>::call(x); + } + +/* + // roundEven + template + GLM_FUNC_QUALIFIER genType roundEven(genType const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'roundEven' only accept floating-point inputs"); + + return genType(int(x + genType(int(x) % 2))); + } +*/ + + // roundEven + template + GLM_FUNC_QUALIFIER genType roundEven(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'roundEven' only accept floating-point inputs"); + + int Integer = static_cast(x); + genType IntegerPart = static_cast(Integer); + genType FractionalPart = fract(x); + + if(FractionalPart > static_cast(0.5) || FractionalPart < static_cast(0.5)) + { + return round(x); + } + else if((Integer % 2) == 0) + { + return IntegerPart; + } + else if(x <= static_cast(0)) // Work around... + { + return IntegerPart - static_cast(1); + } + else + { + return IntegerPart + static_cast(1); + } + //else // Bug on MinGW 4.5.2 + //{ + // return mix(IntegerPart + genType(-1), IntegerPart + genType(1), x <= genType(0)); + //} + } + + template + GLM_FUNC_QUALIFIER vec roundEven(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'roundEven' only accept floating-point inputs"); + return detail::functor1::call(roundEven, x); + } + + // ceil + using ::std::ceil; + template + GLM_FUNC_QUALIFIER vec ceil(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'ceil' only accept floating-point inputs"); + return detail::compute_ceil::value>::call(x); + } + + // fract + template + GLM_FUNC_QUALIFIER genType fract(genType x) + { + return fract(vec<1, genType>(x)).x; + } + + template + GLM_FUNC_QUALIFIER vec fract(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fract' only accept floating-point inputs"); + return detail::compute_fract::value>::call(x); + } + + // mod + template + GLM_FUNC_QUALIFIER genType mod(genType x, genType y) + { +# if (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) + // Another Cuda compiler bug https://github.com/g-truc/glm/issues/530 + vec<1, genType, defaultp> Result(mod(vec<1, genType, defaultp>(x), y)); + return Result.x; +# else + return mod(vec<1, genType, defaultp>(x), y).x; +# endif + } + + template + GLM_FUNC_QUALIFIER vec mod(vec const& x, T y) + { + return detail::compute_mod::value>::call(x, vec(y)); + } + + template + GLM_FUNC_QUALIFIER vec mod(vec const& x, vec const& y) + { + return detail::compute_mod::value>::call(x, y); + } + + // modf + template + GLM_FUNC_QUALIFIER genType modf(genType x, genType & i) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'modf' only accept floating-point inputs"); + return std::modf(x, &i); + } + + template + GLM_FUNC_QUALIFIER vec<1, T, Q> modf(vec<1, T, Q> const& x, vec<1, T, Q> & i) + { + return vec<1, T, Q>( + modf(x.x, i.x)); + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> modf(vec<2, T, Q> const& x, vec<2, T, Q> & i) + { + return vec<2, T, Q>( + modf(x.x, i.x), + modf(x.y, i.y)); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> modf(vec<3, T, Q> const& x, vec<3, T, Q> & i) + { + return vec<3, T, Q>( + modf(x.x, i.x), + modf(x.y, i.y), + modf(x.z, i.z)); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> modf(vec<4, T, Q> const& x, vec<4, T, Q> & i) + { + return vec<4, T, Q>( + modf(x.x, i.x), + modf(x.y, i.y), + modf(x.z, i.z), + modf(x.w, i.w)); + } + + //// Only valid if (INT_MIN <= x-y <= INT_MAX) + //// min(x,y) + //r = y + ((x - y) & ((x - y) >> (sizeof(int) * + //CHAR_BIT - 1))); + //// max(x,y) + //r = x - ((x - y) & ((x - y) >> (sizeof(int) * + //CHAR_BIT - 1))); + + // min + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); + return detail::compute_min_vector::value>::call(a, vec(b)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& a, vec const& b) + { + return detail::compute_min_vector::value>::call(a, b); + } + + // max + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); + return detail::compute_max_vector::value>::call(a, vec(b)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& a, vec const& b) + { + return detail::compute_max_vector::value>::call(a, b); + } + + // clamp + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); + return min(max(x, minVal), maxVal); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec clamp(vec const& x, T minVal, T maxVal) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); + return detail::compute_clamp_vector::value>::call(x, vec(minVal), vec(maxVal)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec clamp(vec const& x, vec const& minVal, vec const& maxVal) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); + return detail::compute_clamp_vector::value>::call(x, minVal, maxVal); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genTypeT mix(genTypeT x, genTypeT y, genTypeU a) + { + return detail::compute_mix::call(x, y, a); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec mix(vec const& x, vec const& y, U a) + { + return detail::compute_mix_scalar::value>::call(x, y, a); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec mix(vec const& x, vec const& y, vec const& a) + { + return detail::compute_mix_vector::value>::call(x, y, a); + } + + // step + template + GLM_FUNC_QUALIFIER genType step(genType edge, genType x) + { + return mix(static_cast(1), static_cast(0), x < edge); + } + + template + GLM_FUNC_QUALIFIER vec step(T edge, vec const& x) + { + return detail::compute_step_vector::value>::call(vec(edge), x); + } + + template + GLM_FUNC_QUALIFIER vec step(vec const& edge, vec const& x) + { + return detail::compute_step_vector::value>::call(edge, x); + } + + // smoothstep + template + GLM_FUNC_QUALIFIER genType smoothstep(genType edge0, genType edge1, genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'smoothstep' only accept floating-point inputs"); + + genType const tmp(clamp((x - edge0) / (edge1 - edge0), genType(0), genType(1))); + return tmp * tmp * (genType(3) - genType(2) * tmp); + } + + template + GLM_FUNC_QUALIFIER vec smoothstep(T edge0, T edge1, vec const& x) + { + return detail::compute_smoothstep_vector::value>::call(vec(edge0), vec(edge1), x); + } + + template + GLM_FUNC_QUALIFIER vec smoothstep(vec const& edge0, vec const& edge1, vec const& x) + { + return detail::compute_smoothstep_vector::value>::call(edge0, edge1, x); + } + +# if GLM_HAS_CXX11_STL + using std::isnan; +# else + template + GLM_FUNC_QUALIFIER bool isnan(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isnan' only accept floating-point inputs"); + +# if GLM_HAS_CXX11_STL + return std::isnan(x); +# elif GLM_COMPILER & GLM_COMPILER_VC + return _isnan(x) != 0; +# elif GLM_COMPILER & GLM_COMPILER_INTEL +# if GLM_PLATFORM & GLM_PLATFORM_WINDOWS + return _isnan(x) != 0; +# else + return ::isnan(x) != 0; +# endif +# elif (GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG)) && (GLM_PLATFORM & GLM_PLATFORM_ANDROID) && __cplusplus < 201103L + return _isnan(x) != 0; +# elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) + return ::isnan(x) != 0; +# else + return std::isnan(x); +# endif + } +# endif + + template + GLM_FUNC_QUALIFIER vec isnan(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isnan' only accept floating-point inputs"); + + vec Result; + for (length_t l = 0; l < v.length(); ++l) + Result[l] = glm::isnan(v[l]); + return Result; + } + +# if GLM_HAS_CXX11_STL + using std::isinf; +# else + template + GLM_FUNC_QUALIFIER bool isinf(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isinf' only accept floating-point inputs"); + +# if GLM_HAS_CXX11_STL + return std::isinf(x); +# elif GLM_COMPILER & (GLM_COMPILER_INTEL | GLM_COMPILER_VC) +# if(GLM_PLATFORM & GLM_PLATFORM_WINDOWS) + return _fpclass(x) == _FPCLASS_NINF || _fpclass(x) == _FPCLASS_PINF; +# else + return ::isinf(x); +# endif +# elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG) +# if(GLM_PLATFORM & GLM_PLATFORM_ANDROID && __cplusplus < 201103L) + return _isinf(x) != 0; +# else + return std::isinf(x); +# endif +# elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) + // http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/docs/online/group__CUDA__MATH__DOUBLE_g13431dd2b40b51f9139cbb7f50c18fab.html#g13431dd2b40b51f9139cbb7f50c18fab + return ::isinf(double(x)) != 0; +# else + return std::isinf(x); +# endif + } +# endif + + template + GLM_FUNC_QUALIFIER vec isinf(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isinf' only accept floating-point inputs"); + + vec Result; + for (length_t l = 0; l < v.length(); ++l) + Result[l] = glm::isinf(v[l]); + return Result; + } + + GLM_FUNC_QUALIFIER int floatBitsToInt(float v) + { + union + { + float in; + int out; + } u; + + u.in = v; + + return u.out; + } + + template + GLM_FUNC_QUALIFIER vec floatBitsToInt(vec const& v) + { + return detail::functor1::call(floatBitsToInt, v); + } + + GLM_FUNC_QUALIFIER uint floatBitsToUint(float v) + { + union + { + float in; + uint out; + } u; + + u.in = v; + + return u.out; + } + + template + GLM_FUNC_QUALIFIER vec floatBitsToUint(vec const& v) + { + return detail::functor1::call(floatBitsToUint, v); + } + + GLM_FUNC_QUALIFIER float intBitsToFloat(int v) + { + union + { + int in; + float out; + } u; + + u.in = v; + + return u.out; + } + + template + GLM_FUNC_QUALIFIER vec intBitsToFloat(vec const& v) + { + return detail::functor1::call(intBitsToFloat, v); + } + + GLM_FUNC_QUALIFIER float uintBitsToFloat(uint v) + { + union + { + uint in; + float out; + } u; + + u.in = v; + + return u.out; + } + + template + GLM_FUNC_QUALIFIER vec uintBitsToFloat(vec const& v) + { + return reinterpret_cast&>(const_cast&>(v)); + } + +# if GLM_HAS_CXX11_STL + using std::fma; +# else + template + GLM_FUNC_QUALIFIER genType fma(genType const& a, genType const& b, genType const& c) + { + return a * b + c; + } +# endif + + template + GLM_FUNC_QUALIFIER genType frexp(genType x, int& exp) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'frexp' only accept floating-point inputs"); + + return std::frexp(x, &exp); + } + + template + GLM_FUNC_QUALIFIER vec frexp(vec const& v, vec& exp) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'frexp' only accept floating-point inputs"); + + vec Result; + for (length_t l = 0; l < v.length(); ++l) + Result[l] = std::frexp(v[l], &exp[l]); + return Result; + } + + template + GLM_FUNC_QUALIFIER genType ldexp(genType const& x, int const& exp) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'ldexp' only accept floating-point inputs"); + + return std::ldexp(x, exp); + } + + template + GLM_FUNC_QUALIFIER vec ldexp(vec const& v, vec const& exp) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'ldexp' only accept floating-point inputs"); + + vec Result; + for (length_t l = 0; l < v.length(); ++l) + Result[l] = std::ldexp(v[l], exp[l]); + return Result; + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_common_simd.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_common_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_common_simd.inl new file mode 100644 index 000000000000..ce0032d33fef --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_common_simd.inl @@ -0,0 +1,231 @@ +/// @ref core +/// @file glm/detail/func_common_simd.inl + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +#include "../simd/common.h" + +#include + +namespace glm{ +namespace detail +{ + template + struct compute_abs_vector<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> result; + result.data = glm_vec4_abs(v.data); + return result; + } + }; + + template + struct compute_abs_vector<4, int, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v) + { + vec<4, int, Q> result; + result.data = glm_ivec4_abs(v.data); + return result; + } + }; + + template + struct compute_floor<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> result; + result.data = glm_vec4_floor(v.data); + return result; + } + }; + + template + struct compute_ceil<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> result; + result.data = glm_vec4_ceil(v.data); + return result; + } + }; + + template + struct compute_fract<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> result; + result.data = glm_vec4_fract(v.data); + return result; + } + }; + + template + struct compute_round<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> result; + result.data = glm_vec4_round(v.data); + return result; + } + }; + + template + struct compute_mod<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) + { + vec<4, float, Q> result; + result.data = glm_vec4_mod(x.data, y.data); + return result; + } + }; + + template + struct compute_min_vector<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) + { + vec<4, float, Q> result; + result.data = _mm_min_ps(v1.data, v2.data); + return result; + } + }; + + template + struct compute_min_vector<4, int, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) + { + vec<4, int, Q> result; + result.data = _mm_min_epi32(v1.data, v2.data); + return result; + } + }; + + template + struct compute_min_vector<4, uint, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) + { + vec<4, uint, Q> result; + result.data = _mm_min_epu32(v1.data, v2.data); + return result; + } + }; + + template + struct compute_max_vector<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) + { + vec<4, float, Q> result; + result.data = _mm_max_ps(v1.data, v2.data); + return result; + } + }; + + template + struct compute_max_vector<4, int, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) + { + vec<4, int, Q> result; + result.data = _mm_max_epi32(v1.data, v2.data); + return result; + } + }; + + template + struct compute_max_vector<4, uint, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) + { + vec<4, uint, Q> result; + result.data = _mm_max_epu32(v1.data, v2.data); + return result; + } + }; + + template + struct compute_clamp_vector<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& minVal, vec<4, float, Q> const& maxVal) + { + vec<4, float, Q> result; + result.data = _mm_min_ps(_mm_max_ps(x.data, minVal.data), maxVal.data); + return result; + } + }; + + template + struct compute_clamp_vector<4, int, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& x, vec<4, int, Q> const& minVal, vec<4, int, Q> const& maxVal) + { + vec<4, int, Q> result; + result.data = _mm_min_epi32(_mm_max_epi32(x.data, minVal.data), maxVal.data); + return result; + } + }; + + template + struct compute_clamp_vector<4, uint, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& x, vec<4, uint, Q> const& minVal, vec<4, uint, Q> const& maxVal) + { + vec<4, uint, Q> result; + result.data = _mm_min_epu32(_mm_max_epu32(x.data, minVal.data), maxVal.data); + return result; + } + }; + + template + struct compute_mix_vector<4, float, bool, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& y, vec<4, bool, Q> const& a) + { + __m128i const Load = _mm_set_epi32(-static_cast(a.w), -static_cast(a.z), -static_cast(a.y), -static_cast(a.x)); + __m128 const Mask = _mm_castsi128_ps(Load); + + vec<4, float, Q> Result; +# if 0 && GLM_ARCH & GLM_ARCH_AVX + Result.data = _mm_blendv_ps(x.data, y.data, Mask); +# else + Result.data = _mm_or_ps(_mm_and_ps(Mask, y.data), _mm_andnot_ps(Mask, x.data)); +# endif + return Result; + } + }; +/* FIXME + template + struct compute_step_vector + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& edge, vec<4, float, Q> const& x) + { + vec<4, float, Q> Result; + result.data = glm_vec4_step(edge.data, x.data); + return result; + } + }; +*/ + template + struct compute_smoothstep_vector<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& edge0, vec<4, float, Q> const& edge1, vec<4, float, Q> const& x) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_smoothstep(edge0.data, edge1.data, x.data); + return Result; + } + }; +}//namespace detail +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_exponential.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_exponential.inl new file mode 100644 index 000000000000..2efcdc60adc6 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_exponential.inl @@ -0,0 +1,152 @@ +/// @ref core +/// @file glm/detail/func_exponential.inl + +#include "../vector_relational.hpp" +#include "_vectorize.hpp" +#include +#include +#include + +namespace glm{ +namespace detail +{ +# if GLM_HAS_CXX11_STL + using std::log2; +# else + template + genType log2(genType Value) + { + return std::log(Value) * static_cast(1.4426950408889634073599246810019); + } +# endif + + template + struct compute_log2 + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'log2' only accept floating-point inputs. Include for integer inputs."); + + return detail::functor1::call(log2, v); + } + }; + + template + struct compute_sqrt + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(std::sqrt, x); + } + }; + + template + struct compute_inversesqrt + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return static_cast(1) / sqrt(x); + } + }; + + template + struct compute_inversesqrt + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + vec tmp(x); + vec xhalf(tmp * 0.5f); + vec* p = reinterpret_cast*>(const_cast*>(&x)); + vec i = vec(0x5f375a86) - (*p >> vec(1)); + vec* ptmp = reinterpret_cast*>(&i); + tmp = *ptmp; + tmp = tmp * (1.5f - xhalf * tmp * tmp); + return tmp; + } + }; +}//namespace detail + + // pow + using std::pow; + template + GLM_FUNC_QUALIFIER vec pow(vec const& base, vec const& exponent) + { + return detail::functor2::call(pow, base, exponent); + } + + // exp + using std::exp; + template + GLM_FUNC_QUALIFIER vec exp(vec const& x) + { + return detail::functor1::call(exp, x); + } + + // log + using std::log; + template + GLM_FUNC_QUALIFIER vec log(vec const& x) + { + return detail::functor1::call(log, x); + } + +# if GLM_HAS_CXX11_STL + using std::exp2; +# else + //exp2, ln2 = 0.69314718055994530941723212145818f + template + GLM_FUNC_QUALIFIER genType exp2(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'exp2' only accept floating-point inputs"); + + return std::exp(static_cast(0.69314718055994530941723212145818) * x); + } +# endif + + template + GLM_FUNC_QUALIFIER vec exp2(vec const& x) + { + return detail::functor1::call(exp2, x); + } + + // log2, ln2 = 0.69314718055994530941723212145818f + template + GLM_FUNC_QUALIFIER genType log2(genType x) + { + return log2(vec<1, genType>(x)).x; + } + + template + GLM_FUNC_QUALIFIER vec log2(vec const& x) + { + return detail::compute_log2::is_iec559, detail::is_aligned::value>::call(x); + } + + // sqrt + using std::sqrt; + template + GLM_FUNC_QUALIFIER vec sqrt(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'sqrt' only accept floating-point inputs"); + return detail::compute_sqrt::value>::call(x); + } + + // inversesqrt + template + GLM_FUNC_QUALIFIER genType inversesqrt(genType x) + { + return static_cast(1) / sqrt(x); + } + + template + GLM_FUNC_QUALIFIER vec inversesqrt(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'inversesqrt' only accept floating-point inputs"); + return detail::compute_inversesqrt::value>::call(x); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_exponential_simd.inl" +#endif + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_exponential_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_exponential_simd.inl new file mode 100644 index 000000000000..fb78951727f1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_exponential_simd.inl @@ -0,0 +1,37 @@ +/// @ref core +/// @file glm/detail/func_exponential_simd.inl + +#include "../simd/exponential.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm{ +namespace detail +{ + template + struct compute_sqrt<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> Result; + Result.data = _mm_sqrt_ps(v.data); + return Result; + } + }; + +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + template<> + struct compute_sqrt<4, float, aligned_lowp, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& v) + { + vec<4, float, aligned_lowp> Result; + Result.data = glm_vec4_sqrt_lowp(v.data); + return Result; + } + }; +# endif +}//namespace detail +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_geometric.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_geometric.inl new file mode 100644 index 000000000000..404c99056ab4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_geometric.inl @@ -0,0 +1,243 @@ +#include "../exponential.hpp" +#include "../common.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_length + { + GLM_FUNC_QUALIFIER static T call(vec const& v) + { + return sqrt(dot(v, v)); + } + }; + + template + struct compute_distance + { + GLM_FUNC_QUALIFIER static T call(vec const& p0, vec const& p1) + { + return length(p1 - p0); + } + }; + + template + struct compute_dot{}; + + template + struct compute_dot, T, Aligned> + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<1, T, Q> const& a, vec<1, T, Q> const& b) + { + return a.x * b.x; + } + }; + + template + struct compute_dot, T, Aligned> + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<2, T, Q> const& a, vec<2, T, Q> const& b) + { + vec<2, T, Q> tmp(a * b); + return tmp.x + tmp.y; + } + }; + + template + struct compute_dot, T, Aligned> + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<3, T, Q> const& a, vec<3, T, Q> const& b) + { + vec<3, T, Q> tmp(a * b); + return tmp.x + tmp.y + tmp.z; + } + }; + + template + struct compute_dot, T, Aligned> + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> tmp(a * b); + return (tmp.x + tmp.y) + (tmp.z + tmp.w); + } + }; + + template + struct compute_cross + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, T, Q> call(vec<3, T, Q> const& x, vec<3, T, Q> const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cross' accepts only floating-point inputs"); + + return vec<3, T, Q>( + x.y * y.z - y.y * x.z, + x.z * y.x - y.z * x.x, + x.x * y.y - y.x * x.y); + } + }; + + template + struct compute_normalize + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); + + return v * inversesqrt(dot(v, v)); + } + }; + + template + struct compute_faceforward + { + GLM_FUNC_QUALIFIER static vec call(vec const& N, vec const& I, vec const& Nref) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); + + return dot(Nref, I) < static_cast(0) ? N : -N; + } + }; + + template + struct compute_reflect + { + GLM_FUNC_QUALIFIER static vec call(vec const& I, vec const& N) + { + return I - N * dot(N, I) * static_cast(2); + } + }; + + template + struct compute_refract + { + GLM_FUNC_QUALIFIER static vec call(vec const& I, vec const& N, T eta) + { + T const dotValue(dot(N, I)); + T const k(static_cast(1) - eta * eta * (static_cast(1) - dotValue * dotValue)); + vec const Result = + (k >= static_cast(0)) ? (eta * I - (eta * dotValue + std::sqrt(k)) * N) : vec(0); + return Result; + } + }; +}//namespace detail + + // length + template + GLM_FUNC_QUALIFIER genType length(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length' accepts only floating-point inputs"); + + return abs(x); + } + + template + GLM_FUNC_QUALIFIER T length(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length' accepts only floating-point inputs"); + + return detail::compute_length::value>::call(v); + } + + // distance + template + GLM_FUNC_QUALIFIER genType distance(genType const& p0, genType const& p1) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'distance' accepts only floating-point inputs"); + + return length(p1 - p0); + } + + template + GLM_FUNC_QUALIFIER T distance(vec const& p0, vec const& p1) + { + return detail::compute_distance::value>::call(p0, p1); + } + + // dot + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T dot(T x, T y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'dot' accepts only floating-point inputs"); + return x * y; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T dot(vec const& x, vec const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'dot' accepts only floating-point inputs"); + return detail::compute_dot, T, detail::is_aligned::value>::call(x, y); + } + + // cross + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y) + { + return detail::compute_cross::value>::call(x, y); + } +/* + // normalize + template + GLM_FUNC_QUALIFIER genType normalize(genType const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); + + return x < genType(0) ? genType(-1) : genType(1); + } +*/ + template + GLM_FUNC_QUALIFIER vec normalize(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); + + return detail::compute_normalize::value>::call(x); + } + + // faceforward + template + GLM_FUNC_QUALIFIER genType faceforward(genType const& N, genType const& I, genType const& Nref) + { + return dot(Nref, I) < static_cast(0) ? N : -N; + } + + template + GLM_FUNC_QUALIFIER vec faceforward(vec const& N, vec const& I, vec const& Nref) + { + return detail::compute_faceforward::value>::call(N, I, Nref); + } + + // reflect + template + GLM_FUNC_QUALIFIER genType reflect(genType const& I, genType const& N) + { + return I - N * dot(N, I) * genType(2); + } + + template + GLM_FUNC_QUALIFIER vec reflect(vec const& I, vec const& N) + { + return detail::compute_reflect::value>::call(I, N); + } + + // refract + template + GLM_FUNC_QUALIFIER genType refract(genType const& I, genType const& N, genType eta) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'refract' accepts only floating-point inputs"); + genType const dotValue(dot(N, I)); + genType const k(static_cast(1) - eta * eta * (static_cast(1) - dotValue * dotValue)); + return (eta * I - (eta * dotValue + sqrt(k)) * N) * static_cast(k >= static_cast(0)); + } + + template + GLM_FUNC_QUALIFIER vec refract(vec const& I, vec const& N, T eta) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'refract' accepts only floating-point inputs"); + return detail::compute_refract::value>::call(I, N, eta); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_geometric_simd.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_geometric_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_geometric_simd.inl new file mode 100644 index 000000000000..2076dae055c3 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_geometric_simd.inl @@ -0,0 +1,163 @@ +/// @ref core +/// @file glm/detail/func_geometric_simd.inl + +#include "../simd/geometric.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm{ +namespace detail +{ + template + struct compute_length<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& v) + { + return _mm_cvtss_f32(glm_vec4_length(v.data)); + } + }; + + template + struct compute_distance<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& p0, vec<4, float, Q> const& p1) + { + return _mm_cvtss_f32(glm_vec4_distance(p0.data, p1.data)); + } + }; + + template + struct compute_dot, float, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) + { + return _mm_cvtss_f32(glm_vec1_dot(x.data, y.data)); + } + }; + + template + struct compute_cross + { + GLM_FUNC_QUALIFIER static vec<3, float, Q> call(vec<3, float, Q> const& a, vec<3, float, Q> const& b) + { + __m128 const set0 = _mm_set_ps(0.0f, a.z, a.y, a.x); + __m128 const set1 = _mm_set_ps(0.0f, b.z, b.y, b.x); + __m128 const xpd0 = glm_vec4_cross(set0, set1); + + vec<4, float, Q> Result; + Result.data = xpd0; + return vec<3, float, Q>(Result); + } + }; + + template + struct compute_normalize<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_normalize(v.data); + return Result; + } + }; + + template + struct compute_faceforward<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& N, vec<4, float, Q> const& I, vec<4, float, Q> const& Nref) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_faceforward(N.data, I.data, Nref.data); + return Result; + } + }; + + template + struct compute_reflect<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& I, vec<4, float, Q> const& N) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_reflect(I.data, N.data); + return Result; + } + }; + + template + struct compute_refract<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& I, vec<4, float, Q> const& N, float eta) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_refract(I.data, N.data, _mm_set1_ps(eta)); + return Result; + } + }; +}//namespace detail +}//namespace glm + +#elif GLM_ARCH & GLM_ARCH_NEON_BIT +namespace glm{ +namespace detail +{ + template + struct compute_length<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& v) + { + return sqrt(compute_dot, float, true>::call(v, v)); + } + }; + + template + struct compute_distance<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& p0, vec<4, float, Q> const& p1) + { + return compute_length<4, float, Q, true>::call(p1 - p0); + } + }; + + + template + struct compute_dot, float, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) + { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + float32x4_t v = vmulq_f32(x.data, y.data); + return vaddvq_f32(v); +#else // Armv7a with Neon + float32x4_t p = vmulq_f32(x.data, y.data); + float32x2_t v = vpadd_f32(vget_low_f32(p), vget_high_f32(p)); + v = vpadd_f32(v, v); + return vget_lane_f32(v, 0); +#endif + } + }; + + template + struct compute_normalize<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + float32x4_t p = vmulq_f32(v.data, v.data); +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + p = vpaddq_f32(p, p); + p = vpaddq_f32(p, p); +#else + float32x2_t t = vpadd_f32(vget_low_f32(p), vget_high_f32(p)); + t = vpadd_f32(t, t); + p = vcombine_f32(t, t); +#endif + + float32x4_t vd = vrsqrteq_f32(p); + vec<4, float, Q> Result; + Result.data = vmulq_f32(v.data, vd); + return Result; + } + }; +}//namespace detail +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_integer.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_integer.inl new file mode 100644 index 000000000000..68b92e99fc61 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_integer.inl @@ -0,0 +1,369 @@ +/// @ref core + +#include "_vectorize.hpp" +#if(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) +# include +# pragma intrinsic(_BitScanReverse) +#endif//(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) +#include + +#if !GLM_HAS_EXTENDED_INTEGER_TYPE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic ignored "-Wlong-long" +# endif +# if (GLM_COMPILER & GLM_COMPILER_CLANG) +# pragma clang diagnostic ignored "-Wc++11-long-long" +# endif +#endif + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_QUALIFIER T mask(T Bits) + { + return Bits >= static_cast(sizeof(T) * 8) ? ~static_cast(0) : (static_cast(1) << Bits) - static_cast(1); + } + + template + struct compute_bitfieldReverseStep + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T, T) + { + return v; + } + }; + + template + struct compute_bitfieldReverseStep + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T Mask, T Shift) + { + return (v & Mask) << Shift | (v & (~Mask)) >> Shift; + } + }; + + template + struct compute_bitfieldBitCountStep + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T, T) + { + return v; + } + }; + + template + struct compute_bitfieldBitCountStep + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T Mask, T Shift) + { + return (v & Mask) + ((v >> Shift) & Mask); + } + }; + + template + struct compute_findLSB + { + GLM_FUNC_QUALIFIER static int call(genIUType Value) + { + if(Value == 0) + return -1; + + return glm::bitCount(~Value & (Value - static_cast(1))); + } + }; + +# if GLM_HAS_BITSCAN_WINDOWS + template + struct compute_findLSB + { + GLM_FUNC_QUALIFIER static int call(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanForward(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + }; + +# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) + template + struct compute_findLSB + { + GLM_FUNC_QUALIFIER static int call(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanForward64(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + }; +# endif +# endif//GLM_HAS_BITSCAN_WINDOWS + + template + struct compute_findMSB_step_vec + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, T Shift) + { + return x | (x >> Shift); + } + }; + + template + struct compute_findMSB_step_vec + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, T) + { + return x; + } + }; + + template + struct compute_findMSB_vec + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + vec x(v); + x = compute_findMSB_step_vec= 8>::call(x, static_cast( 1)); + x = compute_findMSB_step_vec= 8>::call(x, static_cast( 2)); + x = compute_findMSB_step_vec= 8>::call(x, static_cast( 4)); + x = compute_findMSB_step_vec= 16>::call(x, static_cast( 8)); + x = compute_findMSB_step_vec= 32>::call(x, static_cast(16)); + x = compute_findMSB_step_vec= 64>::call(x, static_cast(32)); + return vec(sizeof(T) * 8 - 1) - glm::bitCount(~x); + } + }; + +# if GLM_HAS_BITSCAN_WINDOWS + template + GLM_FUNC_QUALIFIER int compute_findMSB_32(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanReverse(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + + template + struct compute_findMSB_vec + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(compute_findMSB_32, x); + } + }; + +# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) + template + GLM_FUNC_QUALIFIER int compute_findMSB_64(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanReverse64(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + + template + struct compute_findMSB_vec + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(compute_findMSB_64, x); + } + }; +# endif +# endif//GLM_HAS_BITSCAN_WINDOWS +}//namespace detail + + // uaddCarry + GLM_FUNC_QUALIFIER uint uaddCarry(uint const& x, uint const& y, uint & Carry) + { + detail::uint64 const Value64(static_cast(x) + static_cast(y)); + detail::uint64 const Max32((static_cast(1) << static_cast(32)) - static_cast(1)); + Carry = Value64 > Max32 ? 1u : 0u; + return static_cast(Value64 % (Max32 + static_cast(1))); + } + + template + GLM_FUNC_QUALIFIER vec uaddCarry(vec const& x, vec const& y, vec& Carry) + { + vec Value64(vec(x) + vec(y)); + vec Max32((static_cast(1) << static_cast(32)) - static_cast(1)); + Carry = mix(vec(0), vec(1), greaterThan(Value64, Max32)); + return vec(Value64 % (Max32 + static_cast(1))); + } + + // usubBorrow + GLM_FUNC_QUALIFIER uint usubBorrow(uint const& x, uint const& y, uint & Borrow) + { + Borrow = x >= y ? static_cast(0) : static_cast(1); + return y - x; + } + + template + GLM_FUNC_QUALIFIER vec usubBorrow(vec const& x, vec const& y, vec& Borrow) + { + Borrow = mix(vec(1), vec(0), greaterThanEqual(x, y)); + vec const YgeX(y - x); + vec const XgeY(vec((static_cast(1) << static_cast(32)) + (vec(y) - vec(x)))); + return mix(XgeY, YgeX, greaterThanEqual(y, x)); + } + + // umulExtended + GLM_FUNC_QUALIFIER void umulExtended(uint const& x, uint const& y, uint & msb, uint & lsb) + { + detail::uint64 Value64 = static_cast(x) * static_cast(y); + msb = static_cast(Value64 >> static_cast(32)); + lsb = static_cast(Value64); + } + + template + GLM_FUNC_QUALIFIER void umulExtended(vec const& x, vec const& y, vec& msb, vec& lsb) + { + vec Value64(vec(x) * vec(y)); + msb = vec(Value64 >> static_cast(32)); + lsb = vec(Value64); + } + + // imulExtended + GLM_FUNC_QUALIFIER void imulExtended(int x, int y, int& msb, int& lsb) + { + detail::int64 Value64 = static_cast(x) * static_cast(y); + msb = static_cast(Value64 >> static_cast(32)); + lsb = static_cast(Value64); + } + + template + GLM_FUNC_QUALIFIER void imulExtended(vec const& x, vec const& y, vec& msb, vec& lsb) + { + vec Value64(vec(x) * vec(y)); + lsb = vec(Value64 & static_cast(0xFFFFFFFF)); + msb = vec((Value64 >> static_cast(32)) & static_cast(0xFFFFFFFF)); + } + + // bitfieldExtract + template + GLM_FUNC_QUALIFIER genIUType bitfieldExtract(genIUType Value, int Offset, int Bits) + { + return bitfieldExtract(vec<1, genIUType>(Value), Offset, Bits).x; + } + + template + GLM_FUNC_QUALIFIER vec bitfieldExtract(vec const& Value, int Offset, int Bits) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldExtract' only accept integer inputs"); + + return (Value >> static_cast(Offset)) & static_cast(detail::mask(Bits)); + } + + // bitfieldInsert + template + GLM_FUNC_QUALIFIER genIUType bitfieldInsert(genIUType const& Base, genIUType const& Insert, int Offset, int Bits) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values"); + + return bitfieldInsert(vec<1, genIUType>(Base), vec<1, genIUType>(Insert), Offset, Bits).x; + } + + template + GLM_FUNC_QUALIFIER vec bitfieldInsert(vec const& Base, vec const& Insert, int Offset, int Bits) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values"); + + T const Mask = detail::mask(static_cast(Bits)) << Offset; + return (Base & ~Mask) | ((Insert << static_cast(Offset)) & Mask); + } + + // bitfieldReverse + template + GLM_FUNC_QUALIFIER genIUType bitfieldReverse(genIUType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldReverse' only accept integer values"); + + return bitfieldReverse(glm::vec<1, genIUType, glm::defaultp>(x)).x; + } + + template + GLM_FUNC_QUALIFIER vec bitfieldReverse(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldReverse' only accept integer values"); + + vec x(v); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 2>::call(x, static_cast(0x5555555555555555ull), static_cast( 1)); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 4>::call(x, static_cast(0x3333333333333333ull), static_cast( 2)); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 8>::call(x, static_cast(0x0F0F0F0F0F0F0F0Full), static_cast( 4)); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 16>::call(x, static_cast(0x00FF00FF00FF00FFull), static_cast( 8)); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 32>::call(x, static_cast(0x0000FFFF0000FFFFull), static_cast(16)); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 64>::call(x, static_cast(0x00000000FFFFFFFFull), static_cast(32)); + return x; + } + + // bitCount + template + GLM_FUNC_QUALIFIER int bitCount(genIUType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitCount' only accept integer values"); + + return bitCount(glm::vec<1, genIUType, glm::defaultp>(x)).x; + } + + template + GLM_FUNC_QUALIFIER vec bitCount(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitCount' only accept integer values"); + +# if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable : 4310) //cast truncates constant value +# endif + + vec::type, Q> x(v); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 2>::call(x, typename detail::make_unsigned::type(0x5555555555555555ull), typename detail::make_unsigned::type( 1)); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 4>::call(x, typename detail::make_unsigned::type(0x3333333333333333ull), typename detail::make_unsigned::type( 2)); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 8>::call(x, typename detail::make_unsigned::type(0x0F0F0F0F0F0F0F0Full), typename detail::make_unsigned::type( 4)); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 16>::call(x, typename detail::make_unsigned::type(0x00FF00FF00FF00FFull), typename detail::make_unsigned::type( 8)); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 32>::call(x, typename detail::make_unsigned::type(0x0000FFFF0000FFFFull), typename detail::make_unsigned::type(16)); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 64>::call(x, typename detail::make_unsigned::type(0x00000000FFFFFFFFull), typename detail::make_unsigned::type(32)); + return vec(x); + +# if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif + } + + // findLSB + template + GLM_FUNC_QUALIFIER int findLSB(genIUType Value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); + + return detail::compute_findLSB::call(Value); + } + + template + GLM_FUNC_QUALIFIER vec findLSB(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); + + return detail::functor1::call(findLSB, x); + } + + // findMSB + template + GLM_FUNC_QUALIFIER int findMSB(genIUType v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); + + return findMSB(vec<1, genIUType>(v)).x; + } + + template + GLM_FUNC_QUALIFIER vec findMSB(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); + + return detail::compute_findMSB_vec(sizeof(T) * 8)>::call(v); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_integer_simd.inl" +#endif + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_integer_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_integer_simd.inl new file mode 100644 index 000000000000..8be6c9ce4dc1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_integer_simd.inl @@ -0,0 +1,65 @@ +#include "../simd/integer.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm{ +namespace detail +{ + template + struct compute_bitfieldReverseStep<4, uint, Q, true, true> + { + GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v, uint Mask, uint Shift) + { + __m128i const set0 = v.data; + + __m128i const set1 = _mm_set1_epi32(static_cast(Mask)); + __m128i const and1 = _mm_and_si128(set0, set1); + __m128i const sft1 = _mm_slli_epi32(and1, Shift); + + __m128i const set2 = _mm_andnot_si128(set0, _mm_set1_epi32(-1)); + __m128i const and2 = _mm_and_si128(set0, set2); + __m128i const sft2 = _mm_srai_epi32(and2, Shift); + + __m128i const or0 = _mm_or_si128(sft1, sft2); + + return or0; + } + }; + + template + struct compute_bitfieldBitCountStep<4, uint, Q, true, true> + { + GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v, uint Mask, uint Shift) + { + __m128i const set0 = v.data; + + __m128i const set1 = _mm_set1_epi32(static_cast(Mask)); + __m128i const and0 = _mm_and_si128(set0, set1); + __m128i const sft0 = _mm_slli_epi32(set0, Shift); + __m128i const and1 = _mm_and_si128(sft0, set1); + __m128i const add0 = _mm_add_epi32(and0, and1); + + return add0; + } + }; +}//namespace detail + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template<> + GLM_FUNC_QUALIFIER int bitCount(uint x) + { + return _mm_popcnt_u32(x); + } + +# if(GLM_MODEL == GLM_MODEL_64) + template<> + GLM_FUNC_QUALIFIER int bitCount(detail::uint64 x) + { + return static_cast(_mm_popcnt_u64(x)); + } +# endif//GLM_MODEL +# endif//GLM_ARCH + +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_matrix.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_matrix.inl new file mode 100644 index 000000000000..c2d568ff471d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_matrix.inl @@ -0,0 +1,443 @@ +#include "../geometric.hpp" +#include + +namespace glm{ +namespace detail +{ + template + struct compute_matrixCompMult + { + GLM_FUNC_QUALIFIER static mat call(mat const& x, mat const& y) + { + mat Result; + for(length_t i = 0; i < Result.length(); ++i) + Result[i] = x[i] * y[i]; + return Result; + } + }; + + template + struct compute_matrixCompMult_type { + GLM_FUNC_QUALIFIER static mat call(mat const& x, mat const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, + "'matrixCompMult' only accept floating-point inputs, include to discard this restriction."); + return detail::compute_matrixCompMult::value>::call(x, y); + } + }; + + template + struct compute_outerProduct { + GLM_FUNC_QUALIFIER static typename detail::outerProduct_trait::type call(vec const& c, vec const& r) + { + typename detail::outerProduct_trait::type m; + for(length_t i = 0; i < m.length(); ++i) + m[i] = c * r[i]; + return m; + } + }; + + template + struct compute_outerProduct_type { + GLM_FUNC_QUALIFIER static typename detail::outerProduct_trait::type call(vec const& c, vec const& r) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, + "'outerProduct' only accept floating-point inputs, include to discard this restriction."); + + return detail::compute_outerProduct::call(c, r); + } + }; + + template + struct compute_transpose{}; + + template + struct compute_transpose<2, 2, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<2, 2, T, Q> call(mat<2, 2, T, Q> const& m) + { + mat<2, 2, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + return Result; + } + }; + + template + struct compute_transpose<2, 3, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<3, 2, T, Q> call(mat<2, 3, T, Q> const& m) + { + mat<3,2, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + return Result; + } + }; + + template + struct compute_transpose<2, 4, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<4, 2, T, Q> call(mat<2, 4, T, Q> const& m) + { + mat<4, 2, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[3][0] = m[0][3]; + Result[3][1] = m[1][3]; + return Result; + } + }; + + template + struct compute_transpose<3, 2, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<2, 3, T, Q> call(mat<3, 2, T, Q> const& m) + { + mat<2, 3, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + return Result; + } + }; + + template + struct compute_transpose<3, 3, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m) + { + mat<3, 3, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + return Result; + } + }; + + template + struct compute_transpose<3, 4, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<4, 3, T, Q> call(mat<3, 4, T, Q> const& m) + { + mat<4, 3, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + Result[3][0] = m[0][3]; + Result[3][1] = m[1][3]; + Result[3][2] = m[2][3]; + return Result; + } + }; + + template + struct compute_transpose<4, 2, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<2, 4, T, Q> call(mat<4, 2, T, Q> const& m) + { + mat<2, 4, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[0][3] = m[3][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[1][3] = m[3][1]; + return Result; + } + }; + + template + struct compute_transpose<4, 3, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<3, 4, T, Q> call(mat<4, 3, T, Q> const& m) + { + mat<3, 4, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[0][3] = m[3][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[1][3] = m[3][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + Result[2][3] = m[3][2]; + return Result; + } + }; + + template + struct compute_transpose<4, 4, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m) + { + mat<4, 4, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[0][3] = m[3][0]; + + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[1][3] = m[3][1]; + + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + Result[2][3] = m[3][2]; + + Result[3][0] = m[0][3]; + Result[3][1] = m[1][3]; + Result[3][2] = m[2][3]; + Result[3][3] = m[3][3]; + return Result; + } + }; + + template + struct compute_transpose_type { + GLM_FUNC_QUALIFIER static mat call(mat const& m) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, + "'transpose' only accept floating-point inputs, include to discard this restriction."); + return detail::compute_transpose::value>::call(m); + } + }; + + template + struct compute_determinant{}; + + template + struct compute_determinant<2, 2, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static T call(mat<2, 2, T, Q> const& m) + { + return m[0][0] * m[1][1] - m[1][0] * m[0][1]; + } + }; + + template + struct compute_determinant<3, 3, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static T call(mat<3, 3, T, Q> const& m) + { + return + + m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2]) + - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2]) + + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2]); + } + }; + + template + struct compute_determinant<4, 4, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static T call(mat<4, 4, T, Q> const& m) + { + T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + + vec<4, T, Q> DetCof( + + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), + - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), + + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), + - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); + + return + m[0][0] * DetCof[0] + m[0][1] * DetCof[1] + + m[0][2] * DetCof[2] + m[0][3] * DetCof[3]; + } + }; + + template + struct compute_determinant_type{ + + GLM_FUNC_QUALIFIER static T call(mat const& m) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, + "'determinant' only accept floating-point inputs, include to discard this restriction."); + return detail::compute_determinant::value>::call(m); + } + }; + + template + struct compute_inverse{}; + + template + struct compute_inverse<2, 2, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<2, 2, T, Q> call(mat<2, 2, T, Q> const& m) + { + T OneOverDeterminant = static_cast(1) / ( + + m[0][0] * m[1][1] + - m[1][0] * m[0][1]); + + mat<2, 2, T, Q> Inverse( + + m[1][1] * OneOverDeterminant, + - m[0][1] * OneOverDeterminant, + - m[1][0] * OneOverDeterminant, + + m[0][0] * OneOverDeterminant); + + return Inverse; + } + }; + + template + struct compute_inverse<3, 3, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m) + { + T OneOverDeterminant = static_cast(1) / ( + + m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2]) + - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2]) + + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2])); + + mat<3, 3, T, Q> Inverse; + Inverse[0][0] = + (m[1][1] * m[2][2] - m[2][1] * m[1][2]) * OneOverDeterminant; + Inverse[1][0] = - (m[1][0] * m[2][2] - m[2][0] * m[1][2]) * OneOverDeterminant; + Inverse[2][0] = + (m[1][0] * m[2][1] - m[2][0] * m[1][1]) * OneOverDeterminant; + Inverse[0][1] = - (m[0][1] * m[2][2] - m[2][1] * m[0][2]) * OneOverDeterminant; + Inverse[1][1] = + (m[0][0] * m[2][2] - m[2][0] * m[0][2]) * OneOverDeterminant; + Inverse[2][1] = - (m[0][0] * m[2][1] - m[2][0] * m[0][1]) * OneOverDeterminant; + Inverse[0][2] = + (m[0][1] * m[1][2] - m[1][1] * m[0][2]) * OneOverDeterminant; + Inverse[1][2] = - (m[0][0] * m[1][2] - m[1][0] * m[0][2]) * OneOverDeterminant; + Inverse[2][2] = + (m[0][0] * m[1][1] - m[1][0] * m[0][1]) * OneOverDeterminant; + + return Inverse; + } + }; + + template + struct compute_inverse<4, 4, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m) + { + T Coef00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + T Coef02 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + T Coef03 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + T Coef04 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + T Coef06 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + T Coef07 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + T Coef08 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + T Coef10 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + T Coef11 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + T Coef12 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + T Coef14 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + T Coef15 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + T Coef16 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + T Coef18 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + T Coef19 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + T Coef20 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + T Coef22 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + T Coef23 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + vec<4, T, Q> Fac0(Coef00, Coef00, Coef02, Coef03); + vec<4, T, Q> Fac1(Coef04, Coef04, Coef06, Coef07); + vec<4, T, Q> Fac2(Coef08, Coef08, Coef10, Coef11); + vec<4, T, Q> Fac3(Coef12, Coef12, Coef14, Coef15); + vec<4, T, Q> Fac4(Coef16, Coef16, Coef18, Coef19); + vec<4, T, Q> Fac5(Coef20, Coef20, Coef22, Coef23); + + vec<4, T, Q> Vec0(m[1][0], m[0][0], m[0][0], m[0][0]); + vec<4, T, Q> Vec1(m[1][1], m[0][1], m[0][1], m[0][1]); + vec<4, T, Q> Vec2(m[1][2], m[0][2], m[0][2], m[0][2]); + vec<4, T, Q> Vec3(m[1][3], m[0][3], m[0][3], m[0][3]); + + vec<4, T, Q> Inv0(Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2); + vec<4, T, Q> Inv1(Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4); + vec<4, T, Q> Inv2(Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5); + vec<4, T, Q> Inv3(Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5); + + vec<4, T, Q> SignA(+1, -1, +1, -1); + vec<4, T, Q> SignB(-1, +1, -1, +1); + mat<4, 4, T, Q> Inverse(Inv0 * SignA, Inv1 * SignB, Inv2 * SignA, Inv3 * SignB); + + vec<4, T, Q> Row0(Inverse[0][0], Inverse[1][0], Inverse[2][0], Inverse[3][0]); + + vec<4, T, Q> Dot0(m[0] * Row0); + T Dot1 = (Dot0.x + Dot0.y) + (Dot0.z + Dot0.w); + + T OneOverDeterminant = static_cast(1) / Dot1; + + return Inverse * OneOverDeterminant; + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER mat matrixCompMult(mat const& x, mat const& y) + { + return detail::compute_matrixCompMult_type::is_iec559, detail::is_aligned::value>::call(x, y); + } + + template + GLM_FUNC_QUALIFIER typename detail::outerProduct_trait::type outerProduct(vec const& c, vec const& r) + { + return detail::compute_outerProduct_type::is_iec559>::call(c, r); + } + + template + GLM_FUNC_QUALIFIER typename mat::transpose_type transpose(mat const& m) + { + return detail::compute_transpose_type::is_iec559, detail::is_aligned::value>::call(m); + } + + template + GLM_FUNC_QUALIFIER T determinant(mat const& m) + { + return detail::compute_determinant_type::is_iec559, detail::is_aligned::value>::call(m); + } + + template + GLM_FUNC_QUALIFIER mat inverse(mat const& m) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'inverse' only accept floating-point inputs"); + return detail::compute_inverse::value>::call(m); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_matrix_simd.inl" +#endif + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_matrix_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_matrix_simd.inl new file mode 100644 index 000000000000..b9bb4615de67 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_matrix_simd.inl @@ -0,0 +1,252 @@ +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +#include "type_mat4x4.hpp" +#include "../geometric.hpp" +#include "../simd/matrix.h" +#include + +namespace glm{ +namespace detail +{ +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + template + struct compute_matrixCompMult<4, 4, float, Q, true> + { + GLM_STATIC_ASSERT(detail::is_aligned::value, "Specialization requires aligned"); + + GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& x, mat<4, 4, float, Q> const& y) + { + mat<4, 4, float, Q> Result; + glm_mat4_matrixCompMult( + &x[0].data, + &y[0].data, + &Result[0].data); + return Result; + } + }; +# endif + + template + struct compute_transpose<4, 4, float, Q, true> + { + GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) + { + mat<4, 4, float, Q> Result; + glm_mat4_transpose(&m[0].data, &Result[0].data); + return Result; + } + }; + + template + struct compute_determinant<4, 4, float, Q, true> + { + GLM_FUNC_QUALIFIER static float call(mat<4, 4, float, Q> const& m) + { + return _mm_cvtss_f32(glm_mat4_determinant(&m[0].data)); + } + }; + + template + struct compute_inverse<4, 4, float, Q, true> + { + GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) + { + mat<4, 4, float, Q> Result; + glm_mat4_inverse(&m[0].data, &Result[0].data); + return Result; + } + }; +}//namespace detail + +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + template<> + GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_lowp> outerProduct<4, 4, float, aligned_lowp>(vec<4, float, aligned_lowp> const& c, vec<4, float, aligned_lowp> const& r) + { + __m128 NativeResult[4]; + glm_mat4_outerProduct(c.data, r.data, NativeResult); + mat<4, 4, float, aligned_lowp> Result; + std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); + return Result; + } + + template<> + GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_mediump> outerProduct<4, 4, float, aligned_mediump>(vec<4, float, aligned_mediump> const& c, vec<4, float, aligned_mediump> const& r) + { + __m128 NativeResult[4]; + glm_mat4_outerProduct(c.data, r.data, NativeResult); + mat<4, 4, float, aligned_mediump> Result; + std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); + return Result; + } + + template<> + GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_highp> outerProduct<4, 4, float, aligned_highp>(vec<4, float, aligned_highp> const& c, vec<4, float, aligned_highp> const& r) + { + __m128 NativeResult[4]; + glm_mat4_outerProduct(c.data, r.data, NativeResult); + mat<4, 4, float, aligned_highp> Result; + std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); + return Result; + } +# endif +}//namespace glm + +#elif GLM_ARCH & GLM_ARCH_NEON_BIT + +namespace glm { +#if GLM_LANG & GLM_LANG_CXX11_FLAG + template + GLM_FUNC_QUALIFIER + typename std::enable_if::value, mat<4, 4, float, Q>>::type + operator*(mat<4, 4, float, Q> const & m1, mat<4, 4, float, Q> const & m2) + { + auto MulRow = [&](int l) { + float32x4_t const SrcA = m2[l].data; + + float32x4_t r = neon::mul_lane(m1[0].data, SrcA, 0); + r = neon::madd_lane(r, m1[1].data, SrcA, 1); + r = neon::madd_lane(r, m1[2].data, SrcA, 2); + r = neon::madd_lane(r, m1[3].data, SrcA, 3); + + return r; + }; + + mat<4, 4, float, aligned_highp> Result; + Result[0].data = MulRow(0); + Result[1].data = MulRow(1); + Result[2].data = MulRow(2); + Result[3].data = MulRow(3); + + return Result; + } +#endif // CXX11 + +namespace detail +{ + template + struct compute_inverse<4, 4, float, Q, true> + { + GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) + { + float32x4_t const& m0 = m[0].data; + float32x4_t const& m1 = m[1].data; + float32x4_t const& m2 = m[2].data; + float32x4_t const& m3 = m[3].data; + + // m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + float32x4_t Fac0; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); + Fac0 = w0 * w1 - w2 * w3; + } + + // m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + float32x4_t Fac1; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); + Fac1 = w0 * w1 - w2 * w3; + } + + // m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + float32x4_t Fac2; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); + Fac2 = w0 * w1 - w2 * w3; + } + + // m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + float32x4_t Fac3; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); + Fac3 = w0 * w1 - w2 * w3; + } + + // m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + float32x4_t Fac4; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); + Fac4 = w0 * w1 - w2 * w3; + } + + // m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + float32x4_t Fac5; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); + Fac5 = w0 * w1 - w2 * w3; + } + + float32x4_t Vec0 = neon::copy_lane(neon::dupq_lane(m0, 0), 0, m1, 0); // (m[1][0], m[0][0], m[0][0], m[0][0]); + float32x4_t Vec1 = neon::copy_lane(neon::dupq_lane(m0, 1), 0, m1, 1); // (m[1][1], m[0][1], m[0][1], m[0][1]); + float32x4_t Vec2 = neon::copy_lane(neon::dupq_lane(m0, 2), 0, m1, 2); // (m[1][2], m[0][2], m[0][2], m[0][2]); + float32x4_t Vec3 = neon::copy_lane(neon::dupq_lane(m0, 3), 0, m1, 3); // (m[1][3], m[0][3], m[0][3], m[0][3]); + + float32x4_t Inv0 = Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2; + float32x4_t Inv1 = Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4; + float32x4_t Inv2 = Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5; + float32x4_t Inv3 = Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5; + + float32x4_t r0 = float32x4_t{-1, +1, -1, +1} * Inv0; + float32x4_t r1 = float32x4_t{+1, -1, +1, -1} * Inv1; + float32x4_t r2 = float32x4_t{-1, +1, -1, +1} * Inv2; + float32x4_t r3 = float32x4_t{+1, -1, +1, -1} * Inv3; + + float32x4_t det = neon::mul_lane(r0, m0, 0); + det = neon::madd_lane(det, r1, m0, 1); + det = neon::madd_lane(det, r2, m0, 2); + det = neon::madd_lane(det, r3, m0, 3); + + float32x4_t rdet = vdupq_n_f32(1 / vgetq_lane_f32(det, 0)); + + mat<4, 4, float, Q> r; + r[0].data = vmulq_f32(r0, rdet); + r[1].data = vmulq_f32(r1, rdet); + r[2].data = vmulq_f32(r2, rdet); + r[3].data = vmulq_f32(r3, rdet); + return r; + } + }; +}//namespace detail +}//namespace glm +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_packing.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_packing.inl new file mode 100644 index 000000000000..234b093c081c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_packing.inl @@ -0,0 +1,189 @@ +/// @ref core +/// @file glm/detail/func_packing.inl + +#include "../common.hpp" +#include "type_half.hpp" + +namespace glm +{ + GLM_FUNC_QUALIFIER uint packUnorm2x16(vec2 const& v) + { + union + { + unsigned short in[2]; + uint out; + } u; + + vec<2, unsigned short, defaultp> result(round(clamp(v, 0.0f, 1.0f) * 65535.0f)); + + u.in[0] = result[0]; + u.in[1] = result[1]; + + return u.out; + } + + GLM_FUNC_QUALIFIER vec2 unpackUnorm2x16(uint p) + { + union + { + uint in; + unsigned short out[2]; + } u; + + u.in = p; + + return vec2(u.out[0], u.out[1]) * 1.5259021896696421759365224689097e-5f; + } + + GLM_FUNC_QUALIFIER uint packSnorm2x16(vec2 const& v) + { + union + { + signed short in[2]; + uint out; + } u; + + vec<2, short, defaultp> result(round(clamp(v, -1.0f, 1.0f) * 32767.0f)); + + u.in[0] = result[0]; + u.in[1] = result[1]; + + return u.out; + } + + GLM_FUNC_QUALIFIER vec2 unpackSnorm2x16(uint p) + { + union + { + uint in; + signed short out[2]; + } u; + + u.in = p; + + return clamp(vec2(u.out[0], u.out[1]) * 3.0518509475997192297128208258309e-5f, -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint packUnorm4x8(vec4 const& v) + { + union + { + unsigned char in[4]; + uint out; + } u; + + vec<4, unsigned char, defaultp> result(round(clamp(v, 0.0f, 1.0f) * 255.0f)); + + u.in[0] = result[0]; + u.in[1] = result[1]; + u.in[2] = result[2]; + u.in[3] = result[3]; + + return u.out; + } + + GLM_FUNC_QUALIFIER vec4 unpackUnorm4x8(uint p) + { + union + { + uint in; + unsigned char out[4]; + } u; + + u.in = p; + + return vec4(u.out[0], u.out[1], u.out[2], u.out[3]) * 0.0039215686274509803921568627451f; + } + + GLM_FUNC_QUALIFIER uint packSnorm4x8(vec4 const& v) + { + union + { + signed char in[4]; + uint out; + } u; + + vec<4, signed char, defaultp> result(round(clamp(v, -1.0f, 1.0f) * 127.0f)); + + u.in[0] = result[0]; + u.in[1] = result[1]; + u.in[2] = result[2]; + u.in[3] = result[3]; + + return u.out; + } + + GLM_FUNC_QUALIFIER glm::vec4 unpackSnorm4x8(uint p) + { + union + { + uint in; + signed char out[4]; + } u; + + u.in = p; + + return clamp(vec4(u.out[0], u.out[1], u.out[2], u.out[3]) * 0.0078740157480315f, -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER double packDouble2x32(uvec2 const& v) + { + union + { + uint in[2]; + double out; + } u; + + u.in[0] = v[0]; + u.in[1] = v[1]; + + return u.out; + } + + GLM_FUNC_QUALIFIER uvec2 unpackDouble2x32(double v) + { + union + { + double in; + uint out[2]; + } u; + + u.in = v; + + return uvec2(u.out[0], u.out[1]); + } + + GLM_FUNC_QUALIFIER uint packHalf2x16(vec2 const& v) + { + union + { + signed short in[2]; + uint out; + } u; + + u.in[0] = detail::toFloat16(v.x); + u.in[1] = detail::toFloat16(v.y); + + return u.out; + } + + GLM_FUNC_QUALIFIER vec2 unpackHalf2x16(uint v) + { + union + { + uint in; + signed short out[2]; + } u; + + u.in = v; + + return vec2( + detail::toFloat32(u.out[0]), + detail::toFloat32(u.out[1])); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_packing_simd.inl" +#endif + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_packing_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_packing_simd.inl new file mode 100644 index 000000000000..fd0fe8b7d9b4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_packing_simd.inl @@ -0,0 +1,6 @@ +namespace glm{ +namespace detail +{ + +}//namespace detail +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_trigonometric.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_trigonometric.inl new file mode 100644 index 000000000000..9e6d9cfb1ce7 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_trigonometric.inl @@ -0,0 +1,197 @@ +#include "_vectorize.hpp" +#include +#include + +namespace glm +{ + // radians + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType radians(genType degrees) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'radians' only accept floating-point input"); + + return degrees * static_cast(0.01745329251994329576923690768489); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec radians(vec const& v) + { + return detail::functor1::call(radians, v); + } + + // degrees + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType degrees(genType radians) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'degrees' only accept floating-point input"); + + return radians * static_cast(57.295779513082320876798154814105); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec degrees(vec const& v) + { + return detail::functor1::call(degrees, v); + } + + // sin + using ::std::sin; + + template + GLM_FUNC_QUALIFIER vec sin(vec const& v) + { + return detail::functor1::call(sin, v); + } + + // cos + using std::cos; + + template + GLM_FUNC_QUALIFIER vec cos(vec const& v) + { + return detail::functor1::call(cos, v); + } + + // tan + using std::tan; + + template + GLM_FUNC_QUALIFIER vec tan(vec const& v) + { + return detail::functor1::call(tan, v); + } + + // asin + using std::asin; + + template + GLM_FUNC_QUALIFIER vec asin(vec const& v) + { + return detail::functor1::call(asin, v); + } + + // acos + using std::acos; + + template + GLM_FUNC_QUALIFIER vec acos(vec const& v) + { + return detail::functor1::call(acos, v); + } + + // atan + template + GLM_FUNC_QUALIFIER genType atan(genType y, genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'atan' only accept floating-point input"); + + return ::std::atan2(y, x); + } + + template + GLM_FUNC_QUALIFIER vec atan(vec const& y, vec const& x) + { + return detail::functor2::call(::std::atan2, y, x); + } + + using std::atan; + + template + GLM_FUNC_QUALIFIER vec atan(vec const& v) + { + return detail::functor1::call(atan, v); + } + + // sinh + using std::sinh; + + template + GLM_FUNC_QUALIFIER vec sinh(vec const& v) + { + return detail::functor1::call(sinh, v); + } + + // cosh + using std::cosh; + + template + GLM_FUNC_QUALIFIER vec cosh(vec const& v) + { + return detail::functor1::call(cosh, v); + } + + // tanh + using std::tanh; + + template + GLM_FUNC_QUALIFIER vec tanh(vec const& v) + { + return detail::functor1::call(tanh, v); + } + + // asinh +# if GLM_HAS_CXX11_STL + using std::asinh; +# else + template + GLM_FUNC_QUALIFIER genType asinh(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'asinh' only accept floating-point input"); + + return (x < static_cast(0) ? static_cast(-1) : (x > static_cast(0) ? static_cast(1) : static_cast(0))) * log(std::abs(x) + sqrt(static_cast(1) + x * x)); + } +# endif + + template + GLM_FUNC_QUALIFIER vec asinh(vec const& v) + { + return detail::functor1::call(asinh, v); + } + + // acosh +# if GLM_HAS_CXX11_STL + using std::acosh; +# else + template + GLM_FUNC_QUALIFIER genType acosh(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acosh' only accept floating-point input"); + + if(x < static_cast(1)) + return static_cast(0); + return log(x + sqrt(x * x - static_cast(1))); + } +# endif + + template + GLM_FUNC_QUALIFIER vec acosh(vec const& v) + { + return detail::functor1::call(acosh, v); + } + + // atanh +# if GLM_HAS_CXX11_STL + using std::atanh; +# else + template + GLM_FUNC_QUALIFIER genType atanh(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'atanh' only accept floating-point input"); + + if(std::abs(x) >= static_cast(1)) + return 0; + return static_cast(0.5) * log((static_cast(1) + x) / (static_cast(1) - x)); + } +# endif + + template + GLM_FUNC_QUALIFIER vec atanh(vec const& v) + { + return detail::functor1::call(atanh, v); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_trigonometric_simd.inl" +#endif + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_trigonometric_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_trigonometric_simd.inl new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_vector_relational.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_vector_relational.inl new file mode 100644 index 000000000000..80c9e87fcb97 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_vector_relational.inl @@ -0,0 +1,87 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec lessThan(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] < y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec lessThanEqual(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] <= y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec greaterThan(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] > y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec greaterThanEqual(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] >= y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] == y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] != y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool any(vec const& v) + { + bool Result = false; + for(length_t i = 0; i < L; ++i) + Result = Result || v[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool all(vec const& v) + { + bool Result = true; + for(length_t i = 0; i < L; ++i) + Result = Result && v[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec not_(vec const& v) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = !v[i]; + return Result; + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_vector_relational_simd.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/func_vector_relational_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/func_vector_relational_simd.inl new file mode 100644 index 000000000000..fd0fe8b7d9b4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/func_vector_relational_simd.inl @@ -0,0 +1,6 @@ +namespace glm{ +namespace detail +{ + +}//namespace detail +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/glm.cpp b/thirdparty/manifold/thirdparty/glm/glm/detail/glm.cpp new file mode 100644 index 000000000000..e0755bd65d46 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/glm.cpp @@ -0,0 +1,263 @@ +/// @ref core +/// @file glm/glm.cpp + +#ifndef GLM_ENABLE_EXPERIMENTAL +#define GLM_ENABLE_EXPERIMENTAL +#endif +#include +#include +#include +#include +#include +#include + +namespace glm +{ +// tvec1 type explicit instantiation +template struct vec<1, uint8, lowp>; +template struct vec<1, uint16, lowp>; +template struct vec<1, uint32, lowp>; +template struct vec<1, uint64, lowp>; +template struct vec<1, int8, lowp>; +template struct vec<1, int16, lowp>; +template struct vec<1, int32, lowp>; +template struct vec<1, int64, lowp>; +template struct vec<1, float32, lowp>; +template struct vec<1, float64, lowp>; + +template struct vec<1, uint8, mediump>; +template struct vec<1, uint16, mediump>; +template struct vec<1, uint32, mediump>; +template struct vec<1, uint64, mediump>; +template struct vec<1, int8, mediump>; +template struct vec<1, int16, mediump>; +template struct vec<1, int32, mediump>; +template struct vec<1, int64, mediump>; +template struct vec<1, float32, mediump>; +template struct vec<1, float64, mediump>; + +template struct vec<1, uint8, highp>; +template struct vec<1, uint16, highp>; +template struct vec<1, uint32, highp>; +template struct vec<1, uint64, highp>; +template struct vec<1, int8, highp>; +template struct vec<1, int16, highp>; +template struct vec<1, int32, highp>; +template struct vec<1, int64, highp>; +template struct vec<1, float32, highp>; +template struct vec<1, float64, highp>; + +// tvec2 type explicit instantiation +template struct vec<2, uint8, lowp>; +template struct vec<2, uint16, lowp>; +template struct vec<2, uint32, lowp>; +template struct vec<2, uint64, lowp>; +template struct vec<2, int8, lowp>; +template struct vec<2, int16, lowp>; +template struct vec<2, int32, lowp>; +template struct vec<2, int64, lowp>; +template struct vec<2, float32, lowp>; +template struct vec<2, float64, lowp>; + +template struct vec<2, uint8, mediump>; +template struct vec<2, uint16, mediump>; +template struct vec<2, uint32, mediump>; +template struct vec<2, uint64, mediump>; +template struct vec<2, int8, mediump>; +template struct vec<2, int16, mediump>; +template struct vec<2, int32, mediump>; +template struct vec<2, int64, mediump>; +template struct vec<2, float32, mediump>; +template struct vec<2, float64, mediump>; + +template struct vec<2, uint8, highp>; +template struct vec<2, uint16, highp>; +template struct vec<2, uint32, highp>; +template struct vec<2, uint64, highp>; +template struct vec<2, int8, highp>; +template struct vec<2, int16, highp>; +template struct vec<2, int32, highp>; +template struct vec<2, int64, highp>; +template struct vec<2, float32, highp>; +template struct vec<2, float64, highp>; + +// tvec3 type explicit instantiation +template struct vec<3, uint8, lowp>; +template struct vec<3, uint16, lowp>; +template struct vec<3, uint32, lowp>; +template struct vec<3, uint64, lowp>; +template struct vec<3, int8, lowp>; +template struct vec<3, int16, lowp>; +template struct vec<3, int32, lowp>; +template struct vec<3, int64, lowp>; +template struct vec<3, float32, lowp>; +template struct vec<3, float64, lowp>; + +template struct vec<3, uint8, mediump>; +template struct vec<3, uint16, mediump>; +template struct vec<3, uint32, mediump>; +template struct vec<3, uint64, mediump>; +template struct vec<3, int8, mediump>; +template struct vec<3, int16, mediump>; +template struct vec<3, int32, mediump>; +template struct vec<3, int64, mediump>; +template struct vec<3, float32, mediump>; +template struct vec<3, float64, mediump>; + +template struct vec<3, uint8, highp>; +template struct vec<3, uint16, highp>; +template struct vec<3, uint32, highp>; +template struct vec<3, uint64, highp>; +template struct vec<3, int8, highp>; +template struct vec<3, int16, highp>; +template struct vec<3, int32, highp>; +template struct vec<3, int64, highp>; +template struct vec<3, float32, highp>; +template struct vec<3, float64, highp>; + +// tvec4 type explicit instantiation +template struct vec<4, uint8, lowp>; +template struct vec<4, uint16, lowp>; +template struct vec<4, uint32, lowp>; +template struct vec<4, uint64, lowp>; +template struct vec<4, int8, lowp>; +template struct vec<4, int16, lowp>; +template struct vec<4, int32, lowp>; +template struct vec<4, int64, lowp>; +template struct vec<4, float32, lowp>; +template struct vec<4, float64, lowp>; + +template struct vec<4, uint8, mediump>; +template struct vec<4, uint16, mediump>; +template struct vec<4, uint32, mediump>; +template struct vec<4, uint64, mediump>; +template struct vec<4, int8, mediump>; +template struct vec<4, int16, mediump>; +template struct vec<4, int32, mediump>; +template struct vec<4, int64, mediump>; +template struct vec<4, float32, mediump>; +template struct vec<4, float64, mediump>; + +template struct vec<4, uint8, highp>; +template struct vec<4, uint16, highp>; +template struct vec<4, uint32, highp>; +template struct vec<4, uint64, highp>; +template struct vec<4, int8, highp>; +template struct vec<4, int16, highp>; +template struct vec<4, int32, highp>; +template struct vec<4, int64, highp>; +template struct vec<4, float32, highp>; +template struct vec<4, float64, highp>; + +// tmat2x2 type explicit instantiation +template struct mat<2, 2, float32, lowp>; +template struct mat<2, 2, float64, lowp>; + +template struct mat<2, 2, float32, mediump>; +template struct mat<2, 2, float64, mediump>; + +template struct mat<2, 2, float32, highp>; +template struct mat<2, 2, float64, highp>; + +// tmat2x3 type explicit instantiation +template struct mat<2, 3, float32, lowp>; +template struct mat<2, 3, float64, lowp>; + +template struct mat<2, 3, float32, mediump>; +template struct mat<2, 3, float64, mediump>; + +template struct mat<2, 3, float32, highp>; +template struct mat<2, 3, float64, highp>; + +// tmat2x4 type explicit instantiation +template struct mat<2, 4, float32, lowp>; +template struct mat<2, 4, float64, lowp>; + +template struct mat<2, 4, float32, mediump>; +template struct mat<2, 4, float64, mediump>; + +template struct mat<2, 4, float32, highp>; +template struct mat<2, 4, float64, highp>; + +// tmat3x2 type explicit instantiation +template struct mat<3, 2, float32, lowp>; +template struct mat<3, 2, float64, lowp>; + +template struct mat<3, 2, float32, mediump>; +template struct mat<3, 2, float64, mediump>; + +template struct mat<3, 2, float32, highp>; +template struct mat<3, 2, float64, highp>; + +// tmat3x3 type explicit instantiation +template struct mat<3, 3, float32, lowp>; +template struct mat<3, 3, float64, lowp>; + +template struct mat<3, 3, float32, mediump>; +template struct mat<3, 3, float64, mediump>; + +template struct mat<3, 3, float32, highp>; +template struct mat<3, 3, float64, highp>; + +// tmat3x4 type explicit instantiation +template struct mat<3, 4, float32, lowp>; +template struct mat<3, 4, float64, lowp>; + +template struct mat<3, 4, float32, mediump>; +template struct mat<3, 4, float64, mediump>; + +template struct mat<3, 4, float32, highp>; +template struct mat<3, 4, float64, highp>; + +// tmat4x2 type explicit instantiation +template struct mat<4, 2, float32, lowp>; +template struct mat<4, 2, float64, lowp>; + +template struct mat<4, 2, float32, mediump>; +template struct mat<4, 2, float64, mediump>; + +template struct mat<4, 2, float32, highp>; +template struct mat<4, 2, float64, highp>; + +// tmat4x3 type explicit instantiation +template struct mat<4, 3, float32, lowp>; +template struct mat<4, 3, float64, lowp>; + +template struct mat<4, 3, float32, mediump>; +template struct mat<4, 3, float64, mediump>; + +template struct mat<4, 3, float32, highp>; +template struct mat<4, 3, float64, highp>; + +// tmat4x4 type explicit instantiation +template struct mat<4, 4, float32, lowp>; +template struct mat<4, 4, float64, lowp>; + +template struct mat<4, 4, float32, mediump>; +template struct mat<4, 4, float64, mediump>; + +template struct mat<4, 4, float32, highp>; +template struct mat<4, 4, float64, highp>; + +// tquat type explicit instantiation +template struct qua; +template struct qua; + +template struct qua; +template struct qua; + +template struct qua; +template struct qua; + +//tdualquat type explicit instantiation +template struct tdualquat; +template struct tdualquat; + +template struct tdualquat; +template struct tdualquat; + +template struct tdualquat; +template struct tdualquat; + +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/qualifier.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/qualifier.hpp new file mode 100644 index 000000000000..a6c96cca5c2e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/qualifier.hpp @@ -0,0 +1,229 @@ +#pragma once + +#include "setup.hpp" + +namespace glm +{ + /// Qualify GLM types in term of alignment (packed, aligned) and precision in term of ULPs (lowp, mediump, highp) + enum qualifier + { + packed_highp, ///< Typed data is tightly packed in memory and operations are executed with high precision in term of ULPs + packed_mediump, ///< Typed data is tightly packed in memory and operations are executed with medium precision in term of ULPs for higher performance + packed_lowp, ///< Typed data is tightly packed in memory and operations are executed with low precision in term of ULPs to maximize performance + +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + aligned_highp, ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs + aligned_mediump, ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs for higher performance + aligned_lowp, // ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs to maximize performance + aligned = aligned_highp, ///< By default aligned qualifier is also high precision +# endif + + highp = packed_highp, ///< By default highp qualifier is also packed + mediump = packed_mediump, ///< By default mediump qualifier is also packed + lowp = packed_lowp, ///< By default lowp qualifier is also packed + packed = packed_highp, ///< By default packed qualifier is also high precision + +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE && defined(GLM_FORCE_DEFAULT_ALIGNED_GENTYPES) + defaultp = aligned_highp +# else + defaultp = highp +# endif + }; + + typedef qualifier precision; + + template struct vec; + template struct mat; + template struct qua; + +# if GLM_HAS_TEMPLATE_ALIASES + template using tvec1 = vec<1, T, Q>; + template using tvec2 = vec<2, T, Q>; + template using tvec3 = vec<3, T, Q>; + template using tvec4 = vec<4, T, Q>; + template using tmat2x2 = mat<2, 2, T, Q>; + template using tmat2x3 = mat<2, 3, T, Q>; + template using tmat2x4 = mat<2, 4, T, Q>; + template using tmat3x2 = mat<3, 2, T, Q>; + template using tmat3x3 = mat<3, 3, T, Q>; + template using tmat3x4 = mat<3, 4, T, Q>; + template using tmat4x2 = mat<4, 2, T, Q>; + template using tmat4x3 = mat<4, 3, T, Q>; + template using tmat4x4 = mat<4, 4, T, Q>; + template using tquat = qua; +# endif + +namespace detail +{ + template + struct is_aligned + { + static const bool value = false; + }; + +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + template<> + struct is_aligned + { + static const bool value = true; + }; + + template<> + struct is_aligned + { + static const bool value = true; + }; + + template<> + struct is_aligned + { + static const bool value = true; + }; +# endif + + template + struct storage + { + typedef struct type { + T data[L]; + } type; + }; + +# if GLM_HAS_ALIGNOF + template + struct storage + { + typedef struct alignas(L * sizeof(T)) type { + T data[L]; + } type; + }; + + template + struct storage<3, T, true> + { + typedef struct alignas(4 * sizeof(T)) type { + T data[4]; + } type; + }; +# endif + +# if GLM_ARCH & GLM_ARCH_SSE2_BIT + template<> + struct storage<4, float, true> + { + typedef glm_f32vec4 type; + }; + + template<> + struct storage<4, int, true> + { + typedef glm_i32vec4 type; + }; + + template<> + struct storage<4, unsigned int, true> + { + typedef glm_u32vec4 type; + }; + + template<> + struct storage<2, double, true> + { + typedef glm_f64vec2 type; + }; + + template<> + struct storage<2, detail::int64, true> + { + typedef glm_i64vec2 type; + }; + + template<> + struct storage<2, detail::uint64, true> + { + typedef glm_u64vec2 type; + }; +# endif +# if (GLM_ARCH & GLM_ARCH_AVX_BIT) + template<> + struct storage<4, double, true> + { + typedef glm_f64vec4 type; + }; +# endif + +# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) + template<> + struct storage<4, detail::int64, true> + { + typedef glm_i64vec4 type; + }; + + template<> + struct storage<4, detail::uint64, true> + { + typedef glm_u64vec4 type; + }; +# endif + +# if GLM_ARCH & GLM_ARCH_NEON_BIT + template<> + struct storage<4, float, true> + { + typedef glm_f32vec4 type; + }; + + template<> + struct storage<4, int, true> + { + typedef glm_i32vec4 type; + }; + + template<> + struct storage<4, unsigned int, true> + { + typedef glm_u32vec4 type; + }; +# endif + + enum genTypeEnum + { + GENTYPE_VEC, + GENTYPE_MAT, + GENTYPE_QUAT + }; + + template + struct genTypeTrait + {}; + + template + struct genTypeTrait > + { + static const genTypeEnum GENTYPE = GENTYPE_MAT; + }; + + template + struct init_gentype + { + }; + + template + struct init_gentype + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genType identity() + { + return genType(1, 0, 0, 0); + } + }; + + template + struct init_gentype + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genType identity() + { + return genType(1); + } + }; +}//namespace detail +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/setup.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/setup.hpp new file mode 100644 index 000000000000..19953bcb4e57 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/setup.hpp @@ -0,0 +1,1167 @@ +#ifndef GLM_SETUP_INCLUDED + +#include +#include + +#define GLM_VERSION_MAJOR 0 +#define GLM_VERSION_MINOR 9 +#define GLM_VERSION_PATCH 9 +#define GLM_VERSION_REVISION 9 +#define GLM_VERSION 999 +#define GLM_VERSION_MESSAGE "GLM: version 0.9.9.9" + +#define GLM_SETUP_INCLUDED GLM_VERSION + +/////////////////////////////////////////////////////////////////////////////////// +// Active states + +#define GLM_DISABLE 0 +#define GLM_ENABLE 1 + +/////////////////////////////////////////////////////////////////////////////////// +// Messages + +#if defined(GLM_FORCE_MESSAGES) +# define GLM_MESSAGES GLM_ENABLE +#else +# define GLM_MESSAGES GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Detect the platform + +#include "../simd/platform.h" + +/////////////////////////////////////////////////////////////////////////////////// +// Build model + +#if defined(_M_ARM64) || defined(__LP64__) || defined(_M_X64) || defined(__ppc64__) || defined(__x86_64__) +# define GLM_MODEL GLM_MODEL_64 +#elif defined(__i386__) || defined(__ppc__) || defined(__ILP32__) || defined(_M_ARM) +# define GLM_MODEL GLM_MODEL_32 +#else +# define GLM_MODEL GLM_MODEL_32 +#endif// + +#if !defined(GLM_MODEL) && GLM_COMPILER != 0 +# error "GLM_MODEL undefined, your compiler may not be supported by GLM. Add #define GLM_MODEL 0 to ignore this message." +#endif//GLM_MODEL + +/////////////////////////////////////////////////////////////////////////////////// +// C++ Version + +// User defines: GLM_FORCE_CXX98, GLM_FORCE_CXX03, GLM_FORCE_CXX11, GLM_FORCE_CXX14, GLM_FORCE_CXX17, GLM_FORCE_CXX2A + +#define GLM_LANG_CXX98_FLAG (1 << 1) +#define GLM_LANG_CXX03_FLAG (1 << 2) +#define GLM_LANG_CXX0X_FLAG (1 << 3) +#define GLM_LANG_CXX11_FLAG (1 << 4) +#define GLM_LANG_CXX14_FLAG (1 << 5) +#define GLM_LANG_CXX17_FLAG (1 << 6) +#define GLM_LANG_CXX20_FLAG (1 << 7) +#define GLM_LANG_CXXMS_FLAG (1 << 8) +#define GLM_LANG_CXXGNU_FLAG (1 << 9) + +#define GLM_LANG_CXX98 GLM_LANG_CXX98_FLAG +#define GLM_LANG_CXX03 (GLM_LANG_CXX98 | GLM_LANG_CXX03_FLAG) +#define GLM_LANG_CXX0X (GLM_LANG_CXX03 | GLM_LANG_CXX0X_FLAG) +#define GLM_LANG_CXX11 (GLM_LANG_CXX0X | GLM_LANG_CXX11_FLAG) +#define GLM_LANG_CXX14 (GLM_LANG_CXX11 | GLM_LANG_CXX14_FLAG) +#define GLM_LANG_CXX17 (GLM_LANG_CXX14 | GLM_LANG_CXX17_FLAG) +#define GLM_LANG_CXX20 (GLM_LANG_CXX17 | GLM_LANG_CXX20_FLAG) +#define GLM_LANG_CXXMS GLM_LANG_CXXMS_FLAG +#define GLM_LANG_CXXGNU GLM_LANG_CXXGNU_FLAG + +#if (defined(_MSC_EXTENSIONS)) +# define GLM_LANG_EXT GLM_LANG_CXXMS_FLAG +#elif ((GLM_COMPILER & (GLM_COMPILER_CLANG | GLM_COMPILER_GCC)) && (GLM_ARCH & GLM_ARCH_SIMD_BIT)) +# define GLM_LANG_EXT GLM_LANG_CXXMS_FLAG +#else +# define GLM_LANG_EXT 0 +#endif + +#if (defined(GLM_FORCE_CXX_UNKNOWN)) +# define GLM_LANG 0 +#elif defined(GLM_FORCE_CXX20) +# define GLM_LANG (GLM_LANG_CXX20 | GLM_LANG_EXT) +# define GLM_LANG_STL11_FORCED +#elif defined(GLM_FORCE_CXX17) +# define GLM_LANG (GLM_LANG_CXX17 | GLM_LANG_EXT) +# define GLM_LANG_STL11_FORCED +#elif defined(GLM_FORCE_CXX14) +# define GLM_LANG (GLM_LANG_CXX14 | GLM_LANG_EXT) +# define GLM_LANG_STL11_FORCED +#elif defined(GLM_FORCE_CXX11) +# define GLM_LANG (GLM_LANG_CXX11 | GLM_LANG_EXT) +# define GLM_LANG_STL11_FORCED +#elif defined(GLM_FORCE_CXX03) +# define GLM_LANG (GLM_LANG_CXX03 | GLM_LANG_EXT) +#elif defined(GLM_FORCE_CXX98) +# define GLM_LANG (GLM_LANG_CXX98 | GLM_LANG_EXT) +#else +# if GLM_COMPILER & GLM_COMPILER_VC && defined(_MSVC_LANG) +# if GLM_COMPILER >= GLM_COMPILER_VC15_7 +# define GLM_LANG_PLATFORM _MSVC_LANG +# elif GLM_COMPILER >= GLM_COMPILER_VC15 +# if _MSVC_LANG > 201402L +# define GLM_LANG_PLATFORM 201402L +# else +# define GLM_LANG_PLATFORM _MSVC_LANG +# endif +# else +# define GLM_LANG_PLATFORM 0 +# endif +# else +# define GLM_LANG_PLATFORM 0 +# endif + +# if __cplusplus > 201703L || GLM_LANG_PLATFORM > 201703L +# define GLM_LANG (GLM_LANG_CXX20 | GLM_LANG_EXT) +# elif __cplusplus == 201703L || GLM_LANG_PLATFORM == 201703L +# define GLM_LANG (GLM_LANG_CXX17 | GLM_LANG_EXT) +# elif __cplusplus == 201402L || __cplusplus == 201406L || __cplusplus == 201500L || GLM_LANG_PLATFORM == 201402L +# define GLM_LANG (GLM_LANG_CXX14 | GLM_LANG_EXT) +# elif __cplusplus == 201103L || GLM_LANG_PLATFORM == 201103L +# define GLM_LANG (GLM_LANG_CXX11 | GLM_LANG_EXT) +# elif defined(__INTEL_CXX11_MODE__) || defined(_MSC_VER) || defined(__GXX_EXPERIMENTAL_CXX0X__) +# define GLM_LANG (GLM_LANG_CXX0X | GLM_LANG_EXT) +# elif __cplusplus == 199711L +# define GLM_LANG (GLM_LANG_CXX98 | GLM_LANG_EXT) +# else +# define GLM_LANG (0 | GLM_LANG_EXT) +# endif +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Has of C++ features + +// http://clang.llvm.org/cxx_status.html +// http://gcc.gnu.org/projects/cxx0x.html +// http://msdn.microsoft.com/en-us/library/vstudio/hh567368(v=vs.120).aspx + +// Android has multiple STLs but C++11 STL detection doesn't always work #284 #564 +#if GLM_PLATFORM == GLM_PLATFORM_ANDROID && !defined(GLM_LANG_STL11_FORCED) +# define GLM_HAS_CXX11_STL 0 +#elif (GLM_COMPILER & GLM_COMPILER_CUDA_RTC) == GLM_COMPILER_CUDA_RTC +# define GLM_HAS_CXX11_STL 0 +#elif (GLM_COMPILER & GLM_COMPILER_HIP) +# define GLM_HAS_CXX11_STL 0 +#elif GLM_COMPILER & GLM_COMPILER_CLANG +# if (defined(_LIBCPP_VERSION) || (GLM_LANG & GLM_LANG_CXX11_FLAG) || defined(GLM_LANG_STL11_FORCED)) +# define GLM_HAS_CXX11_STL 1 +# else +# define GLM_HAS_CXX11_STL 0 +# endif +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_CXX11_STL 1 +#else +# define GLM_HAS_CXX11_STL ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_GCC) && (GLM_COMPILER >= GLM_COMPILER_GCC48)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ + ((GLM_PLATFORM != GLM_PLATFORM_WINDOWS) && (GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL15)))) +#endif + +// N1720 +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_STATIC_ASSERT __has_feature(cxx_static_assert) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_STATIC_ASSERT 1 +#else +# define GLM_HAS_STATIC_ASSERT ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N1988 +#if GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_EXTENDED_INTEGER_TYPE 1 +#else +# define GLM_HAS_EXTENDED_INTEGER_TYPE (\ + ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_VC)) || \ + ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_CLANG)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP))) +#endif + +// N2672 Initializer lists http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_INITIALIZER_LISTS __has_feature(cxx_generalized_initializers) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_INITIALIZER_LISTS 1 +#else +# define GLM_HAS_INITIALIZER_LISTS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)) || \ + ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL14)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2544 Unrestricted unions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_UNRESTRICTED_UNIONS __has_feature(cxx_unrestricted_unions) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_UNRESTRICTED_UNIONS 1 +#else +# define GLM_HAS_UNRESTRICTED_UNIONS (GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + (GLM_COMPILER & GLM_COMPILER_VC) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP))) +#endif + +// N2346 +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_DEFAULTED_FUNCTIONS __has_feature(cxx_defaulted_functions) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_DEFAULTED_FUNCTIONS 1 +#else +# define GLM_HAS_DEFAULTED_FUNCTIONS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ + ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ + (GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP))) +#endif + +// N2118 +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_RVALUE_REFERENCES __has_feature(cxx_rvalue_references) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_RVALUE_REFERENCES 1 +#else +# define GLM_HAS_RVALUE_REFERENCES ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_VC)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2437 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS __has_feature(cxx_explicit_conversions) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS 1 +#else +# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL14)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2258 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_TEMPLATE_ALIASES __has_feature(cxx_alias_templates) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_TEMPLATE_ALIASES 1 +#else +# define GLM_HAS_TEMPLATE_ALIASES ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2930 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2009/n2930.html +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_RANGE_FOR __has_feature(cxx_range_for) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_RANGE_FOR 1 +#else +# define GLM_HAS_RANGE_FOR ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2341 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_ALIGNOF __has_feature(cxx_alignas) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_ALIGNOF 1 +#else +# define GLM_HAS_ALIGNOF ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL15)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC14)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2235 Generalized Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf +// N3652 Extended Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3652.html +#if (GLM_ARCH & GLM_ARCH_SIMD_BIT) // Compiler SIMD intrinsics don't support constexpr... +# define GLM_HAS_CONSTEXPR 0 +#elif (GLM_COMPILER & GLM_COMPILER_CLANG) +# define GLM_HAS_CONSTEXPR __has_feature(cxx_relaxed_constexpr) +#elif (GLM_LANG & GLM_LANG_CXX14_FLAG) +# define GLM_HAS_CONSTEXPR 1 +#else +# define GLM_HAS_CONSTEXPR ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && GLM_HAS_INITIALIZER_LISTS && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL17)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)))) +#endif + +#if GLM_HAS_CONSTEXPR +# define GLM_CONSTEXPR constexpr +#else +# define GLM_CONSTEXPR +#endif + +// +#if GLM_HAS_CONSTEXPR +# if (GLM_COMPILER & GLM_COMPILER_CLANG) +# if __has_feature(cxx_if_constexpr) +# define GLM_HAS_IF_CONSTEXPR 1 +# else +# define GLM_HAS_IF_CONSTEXPR 0 +# endif +# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) +# define GLM_HAS_IF_CONSTEXPR 1 +# else +# define GLM_HAS_IF_CONSTEXPR 0 +# endif +#else +# define GLM_HAS_IF_CONSTEXPR 0 +#endif + +#if GLM_HAS_IF_CONSTEXPR +# define GLM_IF_CONSTEXPR if constexpr +#else +# define GLM_IF_CONSTEXPR if +#endif + +// +#if GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_ASSIGNABLE 1 +#else +# define GLM_HAS_ASSIGNABLE ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)) || \ + ((GLM_COMPILER & GLM_COMPILER_GCC) && (GLM_COMPILER >= GLM_COMPILER_GCC49)))) +#endif + +// +#define GLM_HAS_TRIVIAL_QUERIES 0 + +// +#if GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_MAKE_SIGNED 1 +#else +# define GLM_HAS_MAKE_SIGNED ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// +#if defined(GLM_FORCE_INTRINSICS) +# define GLM_HAS_BITSCAN_WINDOWS ((GLM_PLATFORM & GLM_PLATFORM_WINDOWS) && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC14) && (GLM_ARCH & GLM_ARCH_X86_BIT)))) +#else +# define GLM_HAS_BITSCAN_WINDOWS 0 +#endif + +#if GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_NOEXCEPT 1 +#else +# define GLM_HAS_NOEXCEPT 0 +#endif + +#if GLM_HAS_NOEXCEPT +# define GLM_NOEXCEPT noexcept +#else +# define GLM_NOEXCEPT +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// OpenMP +#ifdef _OPENMP +# if GLM_COMPILER & GLM_COMPILER_GCC +# if GLM_COMPILER >= GLM_COMPILER_GCC61 +# define GLM_HAS_OPENMP 45 +# elif GLM_COMPILER >= GLM_COMPILER_GCC49 +# define GLM_HAS_OPENMP 40 +# elif GLM_COMPILER >= GLM_COMPILER_GCC47 +# define GLM_HAS_OPENMP 31 +# else +# define GLM_HAS_OPENMP 0 +# endif +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# if GLM_COMPILER >= GLM_COMPILER_CLANG38 +# define GLM_HAS_OPENMP 31 +# else +# define GLM_HAS_OPENMP 0 +# endif +# elif GLM_COMPILER & GLM_COMPILER_VC +# define GLM_HAS_OPENMP 20 +# elif GLM_COMPILER & GLM_COMPILER_INTEL +# if GLM_COMPILER >= GLM_COMPILER_INTEL16 +# define GLM_HAS_OPENMP 40 +# else +# define GLM_HAS_OPENMP 0 +# endif +# else +# define GLM_HAS_OPENMP 0 +# endif +#else +# define GLM_HAS_OPENMP 0 +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// nullptr + +#if GLM_LANG & GLM_LANG_CXX0X_FLAG +# define GLM_CONFIG_NULLPTR GLM_ENABLE +#else +# define GLM_CONFIG_NULLPTR GLM_DISABLE +#endif + +#if GLM_CONFIG_NULLPTR == GLM_ENABLE +# define GLM_NULLPTR nullptr +#else +# define GLM_NULLPTR 0 +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Static assert + +#if GLM_HAS_STATIC_ASSERT +# define GLM_STATIC_ASSERT(x, message) static_assert(x, message) +#elif GLM_COMPILER & GLM_COMPILER_VC +# define GLM_STATIC_ASSERT(x, message) typedef char __CASSERT__##__LINE__[(x) ? 1 : -1] +#else +# define GLM_STATIC_ASSERT(x, message) assert(x) +#endif//GLM_LANG + +/////////////////////////////////////////////////////////////////////////////////// +// Qualifiers + +// User defines: GLM_CUDA_FORCE_DEVICE_FUNC, GLM_CUDA_FORCE_HOST_FUNC + +#if (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) +# if defined(GLM_CUDA_FORCE_DEVICE_FUNC) && defined(GLM_CUDA_FORCE_HOST_FUNC) +# error "GLM error: GLM_CUDA_FORCE_DEVICE_FUNC and GLM_CUDA_FORCE_HOST_FUNC should not be defined at the same time, GLM by default generates both device and host code for CUDA compiler." +# endif//defined(GLM_CUDA_FORCE_DEVICE_FUNC) && defined(GLM_CUDA_FORCE_HOST_FUNC) + +# if defined(GLM_CUDA_FORCE_DEVICE_FUNC) +# define GLM_CUDA_FUNC_DEF __device__ +# define GLM_CUDA_FUNC_DECL __device__ +# elif defined(GLM_CUDA_FORCE_HOST_FUNC) +# define GLM_CUDA_FUNC_DEF __host__ +# define GLM_CUDA_FUNC_DECL __host__ +# else +# define GLM_CUDA_FUNC_DEF __device__ __host__ +# define GLM_CUDA_FUNC_DECL __device__ __host__ +# endif//defined(GLM_CUDA_FORCE_XXXX_FUNC) +#else +# define GLM_CUDA_FUNC_DEF +# define GLM_CUDA_FUNC_DECL +#endif + +#if defined(GLM_FORCE_INLINE) +# if GLM_COMPILER & GLM_COMPILER_VC +# define GLM_INLINE __forceinline +# define GLM_NEVER_INLINE __declspec(noinline) +# elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG) +# define GLM_INLINE inline __attribute__((__always_inline__)) +# define GLM_NEVER_INLINE __attribute__((__noinline__)) +# elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) +# define GLM_INLINE __forceinline__ +# define GLM_NEVER_INLINE __noinline__ +# else +# define GLM_INLINE inline +# define GLM_NEVER_INLINE +# endif//GLM_COMPILER +#else +# define GLM_INLINE inline +# define GLM_NEVER_INLINE +#endif//defined(GLM_FORCE_INLINE) + +#define GLM_FUNC_DECL GLM_CUDA_FUNC_DECL +#define GLM_FUNC_QUALIFIER GLM_CUDA_FUNC_DEF GLM_INLINE + +// Do not use CUDA function qualifiers on CUDA compiler when functions are made default +#if GLM_HAS_DEFAULTED_FUNCTIONS +# define GLM_DEFAULTED_FUNC_DECL +# define GLM_DEFAULTED_FUNC_QUALIFIER GLM_INLINE +#else +# define GLM_DEFAULTED_FUNC_DECL GLM_FUNC_DECL +# define GLM_DEFAULTED_FUNC_QUALIFIER GLM_FUNC_QUALIFIER +#endif//GLM_HAS_DEFAULTED_FUNCTIONS +#if !defined(GLM_FORCE_CTOR_INIT) +# define GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_DEFAULTED_FUNC_DECL +# define GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_DEFAULTED_FUNC_QUALIFIER +#else +# define GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_FUNC_DECL +# define GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_FUNC_QUALIFIER +#endif//GLM_FORCE_CTOR_INIT + +/////////////////////////////////////////////////////////////////////////////////// +// Swizzle operators + +// User defines: GLM_FORCE_SWIZZLE + +#define GLM_SWIZZLE_DISABLED 0 +#define GLM_SWIZZLE_OPERATOR 1 +#define GLM_SWIZZLE_FUNCTION 2 + +#if defined(GLM_SWIZZLE) +# pragma message("GLM: GLM_SWIZZLE is deprecated, use GLM_FORCE_SWIZZLE instead.") +# define GLM_FORCE_SWIZZLE +#endif + +#if defined(GLM_FORCE_SWIZZLE) && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && !defined(GLM_FORCE_XYZW_ONLY) +# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_OPERATOR +#elif defined(GLM_FORCE_SWIZZLE) +# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_FUNCTION +#else +# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_DISABLED +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Allows using not basic types as genType + +// #define GLM_FORCE_UNRESTRICTED_GENTYPE + +#ifdef GLM_FORCE_UNRESTRICTED_GENTYPE +# define GLM_CONFIG_UNRESTRICTED_GENTYPE GLM_ENABLE +#else +# define GLM_CONFIG_UNRESTRICTED_GENTYPE GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Allows using any scaler as float + +// #define GLM_FORCE_UNRESTRICTED_FLOAT + +#ifdef GLM_FORCE_UNRESTRICTED_FLOAT +# define GLM_CONFIG_UNRESTRICTED_FLOAT GLM_ENABLE +#else +# define GLM_CONFIG_UNRESTRICTED_FLOAT GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Clip control, define GLM_FORCE_DEPTH_ZERO_TO_ONE before including GLM +// to use a clip space between 0 to 1. +// Coordinate system, define GLM_FORCE_LEFT_HANDED before including GLM +// to use left handed coordinate system by default. + +#define GLM_CLIP_CONTROL_ZO_BIT (1 << 0) // ZERO_TO_ONE +#define GLM_CLIP_CONTROL_NO_BIT (1 << 1) // NEGATIVE_ONE_TO_ONE +#define GLM_CLIP_CONTROL_LH_BIT (1 << 2) // LEFT_HANDED, For DirectX, Metal, Vulkan +#define GLM_CLIP_CONTROL_RH_BIT (1 << 3) // RIGHT_HANDED, For OpenGL, default in GLM + +#define GLM_CLIP_CONTROL_LH_ZO (GLM_CLIP_CONTROL_LH_BIT | GLM_CLIP_CONTROL_ZO_BIT) +#define GLM_CLIP_CONTROL_LH_NO (GLM_CLIP_CONTROL_LH_BIT | GLM_CLIP_CONTROL_NO_BIT) +#define GLM_CLIP_CONTROL_RH_ZO (GLM_CLIP_CONTROL_RH_BIT | GLM_CLIP_CONTROL_ZO_BIT) +#define GLM_CLIP_CONTROL_RH_NO (GLM_CLIP_CONTROL_RH_BIT | GLM_CLIP_CONTROL_NO_BIT) + +#ifdef GLM_FORCE_DEPTH_ZERO_TO_ONE +# ifdef GLM_FORCE_LEFT_HANDED +# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_LH_ZO +# else +# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_RH_ZO +# endif +#else +# ifdef GLM_FORCE_LEFT_HANDED +# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_LH_NO +# else +# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_RH_NO +# endif +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Qualifiers + +#if (GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)) +# define GLM_DEPRECATED __declspec(deprecated) +# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef __declspec(align(alignment)) type name +#elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG | GLM_COMPILER_INTEL) +# define GLM_DEPRECATED __attribute__((__deprecated__)) +# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __attribute__((aligned(alignment))) +#elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) +# define GLM_DEPRECATED +# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __align__(x) +#else +# define GLM_DEPRECATED +# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name +#endif + +/////////////////////////////////////////////////////////////////////////////////// + +#ifdef GLM_FORCE_EXPLICIT_CTOR +# define GLM_EXPLICIT explicit +#else +# define GLM_EXPLICIT +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Length type: all length functions returns a length_t type. +// When GLM_FORCE_SIZE_T_LENGTH is defined, length_t is a typedef of size_t otherwise +// length_t is a typedef of int like GLSL defines it. + +#define GLM_LENGTH_INT 1 +#define GLM_LENGTH_SIZE_T 2 + +#ifdef GLM_FORCE_SIZE_T_LENGTH +# define GLM_CONFIG_LENGTH_TYPE GLM_LENGTH_SIZE_T +#else +# define GLM_CONFIG_LENGTH_TYPE GLM_LENGTH_INT +#endif + +namespace glm +{ + using std::size_t; +# if GLM_CONFIG_LENGTH_TYPE == GLM_LENGTH_SIZE_T + typedef size_t length_t; +# else + typedef int length_t; +# endif +}//namespace glm + +/////////////////////////////////////////////////////////////////////////////////// +// constexpr + +#if GLM_HAS_CONSTEXPR +# define GLM_CONFIG_CONSTEXP GLM_ENABLE + + namespace glm + { + template + constexpr std::size_t countof(T const (&)[N]) + { + return N; + } + }//namespace glm +# define GLM_COUNTOF(arr) glm::countof(arr) +#elif defined(_MSC_VER) +# define GLM_CONFIG_CONSTEXP GLM_DISABLE + +# define GLM_COUNTOF(arr) _countof(arr) +#else +# define GLM_CONFIG_CONSTEXP GLM_DISABLE + +# define GLM_COUNTOF(arr) sizeof(arr) / sizeof(arr[0]) +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// uint + +namespace glm{ +namespace detail +{ + template + struct is_int + { + enum test {value = 0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; +}//namespace detail + + typedef unsigned int uint; +}//namespace glm + +/////////////////////////////////////////////////////////////////////////////////// +// 64-bit int + +#if GLM_HAS_EXTENDED_INTEGER_TYPE +# include +#endif + +namespace glm{ +namespace detail +{ +# if GLM_HAS_EXTENDED_INTEGER_TYPE + typedef std::uint64_t uint64; + typedef std::int64_t int64; +# elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) // C99 detected, 64 bit types available + typedef uint64_t uint64; + typedef int64_t int64; +# elif GLM_COMPILER & GLM_COMPILER_VC + typedef unsigned __int64 uint64; + typedef signed __int64 int64; +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic ignored "-Wlong-long" + __extension__ typedef unsigned long long uint64; + __extension__ typedef signed long long int64; +# elif (GLM_COMPILER & GLM_COMPILER_CLANG) +# pragma clang diagnostic ignored "-Wc++11-long-long" + typedef unsigned long long uint64; + typedef signed long long int64; +# else//unknown compiler + typedef unsigned long long uint64; + typedef signed long long int64; +# endif +}//namespace detail +}//namespace glm + +/////////////////////////////////////////////////////////////////////////////////// +// make_unsigned + +#if GLM_HAS_MAKE_SIGNED +# include + +namespace glm{ +namespace detail +{ + using std::make_unsigned; +}//namespace detail +}//namespace glm + +#else + +namespace glm{ +namespace detail +{ + template + struct make_unsigned + {}; + + template<> + struct make_unsigned + { + typedef unsigned char type; + }; + + template<> + struct make_unsigned + { + typedef unsigned char type; + }; + + template<> + struct make_unsigned + { + typedef unsigned short type; + }; + + template<> + struct make_unsigned + { + typedef unsigned int type; + }; + + template<> + struct make_unsigned + { + typedef unsigned long type; + }; + + template<> + struct make_unsigned + { + typedef uint64 type; + }; + + template<> + struct make_unsigned + { + typedef unsigned char type; + }; + + template<> + struct make_unsigned + { + typedef unsigned short type; + }; + + template<> + struct make_unsigned + { + typedef unsigned int type; + }; + + template<> + struct make_unsigned + { + typedef unsigned long type; + }; + + template<> + struct make_unsigned + { + typedef uint64 type; + }; +}//namespace detail +}//namespace glm +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Only use x, y, z, w as vector type components + +#ifdef GLM_FORCE_XYZW_ONLY +# define GLM_CONFIG_XYZW_ONLY GLM_ENABLE +#else +# define GLM_CONFIG_XYZW_ONLY GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Configure the use of defaulted initialized types + +#define GLM_CTOR_INIT_DISABLE 0 +#define GLM_CTOR_INITIALIZER_LIST 1 +#define GLM_CTOR_INITIALISATION 2 + +#if defined(GLM_FORCE_CTOR_INIT) && GLM_HAS_INITIALIZER_LISTS +# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INITIALIZER_LIST +#elif defined(GLM_FORCE_CTOR_INIT) && !GLM_HAS_INITIALIZER_LISTS +# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INITIALISATION +#else +# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INIT_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Use SIMD instruction sets + +#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (GLM_ARCH & GLM_ARCH_SIMD_BIT) +# define GLM_CONFIG_SIMD GLM_ENABLE +#else +# define GLM_CONFIG_SIMD GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Configure the use of defaulted function + +#if GLM_HAS_DEFAULTED_FUNCTIONS +# define GLM_CONFIG_DEFAULTED_FUNCTIONS GLM_ENABLE +# define GLM_DEFAULT = default +#else +# define GLM_CONFIG_DEFAULTED_FUNCTIONS GLM_DISABLE +# define GLM_DEFAULT +#endif + +#if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INIT_DISABLE && GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE +# define GLM_CONFIG_DEFAULTED_DEFAULT_CTOR GLM_ENABLE +# define GLM_DEFAULT_CTOR GLM_DEFAULT +#else +# define GLM_CONFIG_DEFAULTED_DEFAULT_CTOR GLM_DISABLE +# define GLM_DEFAULT_CTOR +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Configure the use of aligned gentypes + +#ifdef GLM_FORCE_ALIGNED // Legacy define +# define GLM_FORCE_DEFAULT_ALIGNED_GENTYPES +#endif + +#ifdef GLM_FORCE_DEFAULT_ALIGNED_GENTYPES +# define GLM_FORCE_ALIGNED_GENTYPES +#endif + +#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (defined(GLM_FORCE_ALIGNED_GENTYPES) || (GLM_CONFIG_SIMD == GLM_ENABLE)) +# define GLM_CONFIG_ALIGNED_GENTYPES GLM_ENABLE +#else +# define GLM_CONFIG_ALIGNED_GENTYPES GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Configure the use of anonymous structure as implementation detail + +#if ((GLM_CONFIG_SIMD == GLM_ENABLE) || (GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR) || (GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE)) +# define GLM_CONFIG_ANONYMOUS_STRUCT GLM_ENABLE +#else +# define GLM_CONFIG_ANONYMOUS_STRUCT GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Silent warnings + +#ifdef GLM_FORCE_SILENT_WARNINGS +# define GLM_SILENT_WARNINGS GLM_ENABLE +#else +# define GLM_SILENT_WARNINGS GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Precision + +#define GLM_HIGHP 1 +#define GLM_MEDIUMP 2 +#define GLM_LOWP 3 + +#if defined(GLM_FORCE_PRECISION_HIGHP_BOOL) || defined(GLM_PRECISION_HIGHP_BOOL) +# define GLM_CONFIG_PRECISION_BOOL GLM_HIGHP +#elif defined(GLM_FORCE_PRECISION_MEDIUMP_BOOL) || defined(GLM_PRECISION_MEDIUMP_BOOL) +# define GLM_CONFIG_PRECISION_BOOL GLM_MEDIUMP +#elif defined(GLM_FORCE_PRECISION_LOWP_BOOL) || defined(GLM_PRECISION_LOWP_BOOL) +# define GLM_CONFIG_PRECISION_BOOL GLM_LOWP +#else +# define GLM_CONFIG_PRECISION_BOOL GLM_HIGHP +#endif + +#if defined(GLM_FORCE_PRECISION_HIGHP_INT) || defined(GLM_PRECISION_HIGHP_INT) +# define GLM_CONFIG_PRECISION_INT GLM_HIGHP +#elif defined(GLM_FORCE_PRECISION_MEDIUMP_INT) || defined(GLM_PRECISION_MEDIUMP_INT) +# define GLM_CONFIG_PRECISION_INT GLM_MEDIUMP +#elif defined(GLM_FORCE_PRECISION_LOWP_INT) || defined(GLM_PRECISION_LOWP_INT) +# define GLM_CONFIG_PRECISION_INT GLM_LOWP +#else +# define GLM_CONFIG_PRECISION_INT GLM_HIGHP +#endif + +#if defined(GLM_FORCE_PRECISION_HIGHP_UINT) || defined(GLM_PRECISION_HIGHP_UINT) +# define GLM_CONFIG_PRECISION_UINT GLM_HIGHP +#elif defined(GLM_FORCE_PRECISION_MEDIUMP_UINT) || defined(GLM_PRECISION_MEDIUMP_UINT) +# define GLM_CONFIG_PRECISION_UINT GLM_MEDIUMP +#elif defined(GLM_FORCE_PRECISION_LOWP_UINT) || defined(GLM_PRECISION_LOWP_UINT) +# define GLM_CONFIG_PRECISION_UINT GLM_LOWP +#else +# define GLM_CONFIG_PRECISION_UINT GLM_HIGHP +#endif + +#if defined(GLM_FORCE_PRECISION_HIGHP_FLOAT) || defined(GLM_PRECISION_HIGHP_FLOAT) +# define GLM_CONFIG_PRECISION_FLOAT GLM_HIGHP +#elif defined(GLM_FORCE_PRECISION_MEDIUMP_FLOAT) || defined(GLM_PRECISION_MEDIUMP_FLOAT) +# define GLM_CONFIG_PRECISION_FLOAT GLM_MEDIUMP +#elif defined(GLM_FORCE_PRECISION_LOWP_FLOAT) || defined(GLM_PRECISION_LOWP_FLOAT) +# define GLM_CONFIG_PRECISION_FLOAT GLM_LOWP +#else +# define GLM_CONFIG_PRECISION_FLOAT GLM_HIGHP +#endif + +#if defined(GLM_FORCE_PRECISION_HIGHP_DOUBLE) || defined(GLM_PRECISION_HIGHP_DOUBLE) +# define GLM_CONFIG_PRECISION_DOUBLE GLM_HIGHP +#elif defined(GLM_FORCE_PRECISION_MEDIUMP_DOUBLE) || defined(GLM_PRECISION_MEDIUMP_DOUBLE) +# define GLM_CONFIG_PRECISION_DOUBLE GLM_MEDIUMP +#elif defined(GLM_FORCE_PRECISION_LOWP_DOUBLE) || defined(GLM_PRECISION_LOWP_DOUBLE) +# define GLM_CONFIG_PRECISION_DOUBLE GLM_LOWP +#else +# define GLM_CONFIG_PRECISION_DOUBLE GLM_HIGHP +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Check inclusions of different versions of GLM + +#elif ((GLM_SETUP_INCLUDED != GLM_VERSION) && !defined(GLM_FORCE_IGNORE_VERSION)) +# error "GLM error: A different version of GLM is already included. Define GLM_FORCE_IGNORE_VERSION before including GLM headers to ignore this error." +#elif GLM_SETUP_INCLUDED == GLM_VERSION + +/////////////////////////////////////////////////////////////////////////////////// +// Messages + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_DISPLAYED) +# define GLM_MESSAGE_DISPLAYED +# define GLM_STR_HELPER(x) #x +# define GLM_STR(x) GLM_STR_HELPER(x) + + // Report GLM version +# pragma message (GLM_STR(GLM_VERSION_MESSAGE)) + + // Report C++ language +# if (GLM_LANG & GLM_LANG_CXX20_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 20 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX20_FLAG) +# pragma message("GLM: C++ 2A") +# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 17 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) +# pragma message("GLM: C++ 17") +# elif (GLM_LANG & GLM_LANG_CXX14_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 14 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX14_FLAG) +# pragma message("GLM: C++ 14") +# elif (GLM_LANG & GLM_LANG_CXX11_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 11 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX11_FLAG) +# pragma message("GLM: C++ 11") +# elif (GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 0x with extensions") +# elif (GLM_LANG & GLM_LANG_CXX0X_FLAG) +# pragma message("GLM: C++ 0x") +# elif (GLM_LANG & GLM_LANG_CXX03_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 03 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX03_FLAG) +# pragma message("GLM: C++ 03") +# elif (GLM_LANG & GLM_LANG_CXX98_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 98 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX98_FLAG) +# pragma message("GLM: C++ 98") +# else +# pragma message("GLM: C++ language undetected") +# endif//GLM_LANG + + // Report compiler detection +# if GLM_COMPILER & GLM_COMPILER_CUDA +# pragma message("GLM: CUDA compiler detected") +# elif GLM_COMPILER & GLM_COMPILER_HIP +# pragma message("GLM: HIP compiler detected") +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma message("GLM: Visual C++ compiler detected") +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma message("GLM: Clang compiler detected") +# elif GLM_COMPILER & GLM_COMPILER_INTEL +# pragma message("GLM: Intel Compiler detected") +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma message("GLM: GCC compiler detected") +# else +# pragma message("GLM: Compiler not detected") +# endif + + // Report build target +# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with AVX2 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_AVX2_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with AVX2 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with AVX instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with AVX instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_SSE42_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with SSE4.2 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_SSE42_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with SSE4.2 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_SSE41_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with SSE4.1 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_SSE41_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with SSE4.1 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_SSSE3_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with SSSE3 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_SSSE3_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with SSSE3 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_SSE3_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with SSE3 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_SSE3_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with SSE3 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_SSE2_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with SSE2 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_SSE2_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with SSE2 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_X86_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits build target") +# elif (GLM_ARCH & GLM_ARCH_X86_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits build target") + +# elif (GLM_ARCH & GLM_ARCH_NEON_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: ARM 64 bits with Neon instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_NEON_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: ARM 32 bits with Neon instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_ARM_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: ARM 64 bits build target") +# elif (GLM_ARCH & GLM_ARCH_ARM_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: ARM 32 bits build target") + +# elif (GLM_ARCH & GLM_ARCH_MIPS_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: MIPS 64 bits build target") +# elif (GLM_ARCH & GLM_ARCH_MIPS_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: MIPS 32 bits build target") + +# elif (GLM_ARCH & GLM_ARCH_PPC_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: PowerPC 64 bits build target") +# elif (GLM_ARCH & GLM_ARCH_PPC_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: PowerPC 32 bits build target") +# else +# pragma message("GLM: Unknown build target") +# endif//GLM_ARCH + + // Report platform name +# if(GLM_PLATFORM & GLM_PLATFORM_QNXNTO) +# pragma message("GLM: QNX platform detected") +//# elif(GLM_PLATFORM & GLM_PLATFORM_IOS) +//# pragma message("GLM: iOS platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_APPLE) +# pragma message("GLM: Apple platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_WINCE) +# pragma message("GLM: WinCE platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_WINDOWS) +# pragma message("GLM: Windows platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_CHROME_NACL) +# pragma message("GLM: Native Client detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) +# pragma message("GLM: Android platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_LINUX) +# pragma message("GLM: Linux platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_UNIX) +# pragma message("GLM: UNIX platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_UNKNOWN) +# pragma message("GLM: platform unknown") +# else +# pragma message("GLM: platform not detected") +# endif + + // Report whether only xyzw component are used +# if defined GLM_FORCE_XYZW_ONLY +# pragma message("GLM: GLM_FORCE_XYZW_ONLY is defined. Only x, y, z and w component are available in vector type. This define disables swizzle operators and SIMD instruction sets.") +# endif + + // Report swizzle operator support +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +# pragma message("GLM: GLM_FORCE_SWIZZLE is defined, swizzling operators enabled.") +# elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION +# pragma message("GLM: GLM_FORCE_SWIZZLE is defined, swizzling functions enabled. Enable compiler C++ language extensions to enable swizzle operators.") +# else +# pragma message("GLM: GLM_FORCE_SWIZZLE is undefined. swizzling functions or operators are disabled.") +# endif + + // Report .length() type +# if GLM_CONFIG_LENGTH_TYPE == GLM_LENGTH_SIZE_T +# pragma message("GLM: GLM_FORCE_SIZE_T_LENGTH is defined. .length() returns a glm::length_t, a typedef of std::size_t.") +# else +# pragma message("GLM: GLM_FORCE_SIZE_T_LENGTH is undefined. .length() returns a glm::length_t, a typedef of int following GLSL.") +# endif + +# if GLM_CONFIG_UNRESTRICTED_GENTYPE == GLM_ENABLE +# pragma message("GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is defined. Removes GLSL restrictions on valid function genTypes.") +# else +# pragma message("GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is undefined. Follows strictly GLSL on valid function genTypes.") +# endif + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# pragma message("GLM: GLM_FORCE_SILENT_WARNINGS is defined. Ignores C++ warnings from using C++ language extensions.") +# else +# pragma message("GLM: GLM_FORCE_SILENT_WARNINGS is undefined. Shows C++ warnings from using C++ language extensions.") +# endif + +# ifdef GLM_FORCE_SINGLE_ONLY +# pragma message("GLM: GLM_FORCE_SINGLE_ONLY is defined. Using only single precision floating-point types.") +# endif + +# if defined(GLM_FORCE_ALIGNED_GENTYPES) && (GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE) +# undef GLM_FORCE_ALIGNED_GENTYPES +# pragma message("GLM: GLM_FORCE_ALIGNED_GENTYPES is defined, allowing aligned types. This prevents the use of C++ constexpr.") +# elif defined(GLM_FORCE_ALIGNED_GENTYPES) && (GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE) +# undef GLM_FORCE_ALIGNED_GENTYPES +# pragma message("GLM: GLM_FORCE_ALIGNED_GENTYPES is defined but is disabled. It requires C++11 and language extensions.") +# endif + +# if defined(GLM_FORCE_DEFAULT_ALIGNED_GENTYPES) +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE +# undef GLM_FORCE_DEFAULT_ALIGNED_GENTYPES +# pragma message("GLM: GLM_FORCE_DEFAULT_ALIGNED_GENTYPES is defined but is disabled. It requires C++11 and language extensions.") +# elif GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE +# pragma message("GLM: GLM_FORCE_DEFAULT_ALIGNED_GENTYPES is defined. All gentypes (e.g. vec3) will be aligned and padded by default.") +# endif +# endif + +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT +# pragma message("GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is defined. Using zero to one depth clip space.") +# else +# pragma message("GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is undefined. Using negative one to one depth clip space.") +# endif + +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT +# pragma message("GLM: GLM_FORCE_LEFT_HANDED is defined. Using left handed coordinate system.") +# else +# pragma message("GLM: GLM_FORCE_LEFT_HANDED is undefined. Using right handed coordinate system.") +# endif +#endif//GLM_MESSAGES + +#endif//GLM_SETUP_INCLUDED diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_float.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_float.hpp new file mode 100644 index 000000000000..c8037ebd7aa2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_float.hpp @@ -0,0 +1,68 @@ +#pragma once + +#include "setup.hpp" + +#if GLM_COMPILER == GLM_COMPILER_VC12 +# pragma warning(push) +# pragma warning(disable: 4512) // assignment operator could not be generated +#endif + +namespace glm{ +namespace detail +{ + template + union float_t + {}; + + // https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + template <> + union float_t + { + typedef int int_type; + typedef float float_type; + + GLM_CONSTEXPR float_t(float_type Num = 0.0f) : f(Num) {} + + GLM_CONSTEXPR float_t& operator=(float_t const& x) + { + f = x.f; + return *this; + } + + // Portable extraction of components. + GLM_CONSTEXPR bool negative() const { return i < 0; } + GLM_CONSTEXPR int_type mantissa() const { return i & ((1 << 23) - 1); } + GLM_CONSTEXPR int_type exponent() const { return (i >> 23) & ((1 << 8) - 1); } + + int_type i; + float_type f; + }; + + template <> + union float_t + { + typedef detail::int64 int_type; + typedef double float_type; + + GLM_CONSTEXPR float_t(float_type Num = static_cast(0)) : f(Num) {} + + GLM_CONSTEXPR float_t& operator=(float_t const& x) + { + f = x.f; + return *this; + } + + // Portable extraction of components. + GLM_CONSTEXPR bool negative() const { return i < 0; } + GLM_CONSTEXPR int_type mantissa() const { return i & ((int_type(1) << 52) - 1); } + GLM_CONSTEXPR int_type exponent() const { return (i >> 52) & ((int_type(1) << 11) - 1); } + + int_type i; + float_type f; + }; +}//namespace detail +}//namespace glm + +#if GLM_COMPILER == GLM_COMPILER_VC12 +# pragma warning(pop) +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_half.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_half.hpp new file mode 100644 index 000000000000..40b8bec00d34 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_half.hpp @@ -0,0 +1,16 @@ +#pragma once + +#include "setup.hpp" + +namespace glm{ +namespace detail +{ + typedef short hdata; + + GLM_FUNC_DECL float toFloat32(hdata value); + GLM_FUNC_DECL hdata toFloat16(float const& value); + +}//namespace detail +}//namespace glm + +#include "type_half.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_half.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_half.inl new file mode 100644 index 000000000000..5d239cf22c25 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_half.inl @@ -0,0 +1,241 @@ +namespace glm{ +namespace detail +{ + GLM_FUNC_QUALIFIER float overflow() + { + volatile float f = 1e10; + + for(int i = 0; i < 10; ++i) + f = f * f; // this will overflow before the for loop terminates + return f; + } + + union uif32 + { + GLM_FUNC_QUALIFIER uif32() : + i(0) + {} + + GLM_FUNC_QUALIFIER uif32(float f_) : + f(f_) + {} + + GLM_FUNC_QUALIFIER uif32(unsigned int i_) : + i(i_) + {} + + float f; + unsigned int i; + }; + + GLM_FUNC_QUALIFIER float toFloat32(hdata value) + { + int s = (value >> 15) & 0x00000001; + int e = (value >> 10) & 0x0000001f; + int m = value & 0x000003ff; + + if(e == 0) + { + if(m == 0) + { + // + // Plus or minus zero + // + + detail::uif32 result; + result.i = static_cast(s << 31); + return result.f; + } + else + { + // + // Denormalized number -- renormalize it + // + + while(!(m & 0x00000400)) + { + m <<= 1; + e -= 1; + } + + e += 1; + m &= ~0x00000400; + } + } + else if(e == 31) + { + if(m == 0) + { + // + // Positive or negative infinity + // + + uif32 result; + result.i = static_cast((s << 31) | 0x7f800000); + return result.f; + } + else + { + // + // Nan -- preserve sign and significand bits + // + + uif32 result; + result.i = static_cast((s << 31) | 0x7f800000 | (m << 13)); + return result.f; + } + } + + // + // Normalized number + // + + e = e + (127 - 15); + m = m << 13; + + // + // Assemble s, e and m. + // + + uif32 Result; + Result.i = static_cast((s << 31) | (e << 23) | m); + return Result.f; + } + + GLM_FUNC_QUALIFIER hdata toFloat16(float const& f) + { + uif32 Entry; + Entry.f = f; + int i = static_cast(Entry.i); + + // + // Our floating point number, f, is represented by the bit + // pattern in integer i. Disassemble that bit pattern into + // the sign, s, the exponent, e, and the significand, m. + // Shift s into the position where it will go in the + // resulting half number. + // Adjust e, accounting for the different exponent bias + // of float and half (127 versus 15). + // + + int s = (i >> 16) & 0x00008000; + int e = ((i >> 23) & 0x000000ff) - (127 - 15); + int m = i & 0x007fffff; + + // + // Now reassemble s, e and m into a half: + // + + if(e <= 0) + { + if(e < -10) + { + // + // E is less than -10. The absolute value of f is + // less than half_MIN (f may be a small normalized + // float, a denormalized float or a zero). + // + // We convert f to a half zero. + // + + return hdata(s); + } + + // + // E is between -10 and 0. F is a normalized float, + // whose magnitude is less than __half_NRM_MIN. + // + // We convert f to a denormalized half. + // + + m = (m | 0x00800000) >> (1 - e); + + // + // Round to nearest, round "0.5" up. + // + // Rounding may cause the significand to overflow and make + // our number normalized. Because of the way a half's bits + // are laid out, we don't have to treat this case separately; + // the code below will handle it correctly. + // + + if(m & 0x00001000) + m += 0x00002000; + + // + // Assemble the half from s, e (zero) and m. + // + + return hdata(s | (m >> 13)); + } + else if(e == 0xff - (127 - 15)) + { + if(m == 0) + { + // + // F is an infinity; convert f to a half + // infinity with the same sign as f. + // + + return hdata(s | 0x7c00); + } + else + { + // + // F is a NAN; we produce a half NAN that preserves + // the sign bit and the 10 leftmost bits of the + // significand of f, with one exception: If the 10 + // leftmost bits are all zero, the NAN would turn + // into an infinity, so we have to set at least one + // bit in the significand. + // + + m >>= 13; + + return hdata(s | 0x7c00 | m | (m == 0)); + } + } + else + { + // + // E is greater than zero. F is a normalized float. + // We try to convert f to a normalized half. + // + + // + // Round to nearest, round "0.5" up + // + + if(m & 0x00001000) + { + m += 0x00002000; + + if(m & 0x00800000) + { + m = 0; // overflow in significand, + e += 1; // adjust exponent + } + } + + // + // Handle exponent overflow + // + + if (e > 30) + { + overflow(); // Cause a hardware floating point overflow; + + return hdata(s | 0x7c00); + // if this returns, the half becomes an + } // infinity with the same sign as f. + + // + // Assemble the half from s, e and m. + // + + return hdata(s | (e << 10) | (m >> 13)); + } + } + +}//namespace detail +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x2.hpp new file mode 100644 index 000000000000..a61bded4b7ae --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x2.hpp @@ -0,0 +1,177 @@ +/// @ref core +/// @file glm/detail/type_mat2x2.hpp + +#pragma once + +#include "type_vec2.hpp" +#include +#include + +namespace glm +{ + template + struct mat<2, 2, T, Q> + { + typedef vec<2, T, Q> col_type; + typedef vec<2, T, Q> row_type; + typedef mat<2, 2, T, Q> type; + typedef mat<2, 2, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[2]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 2, T, P> const& m); + + GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + T const& x1, T const& y1, + T const& x2, T const& y2); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + col_type const& v1, + col_type const& v2); + + // -- Conversions -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + U const& x1, V const& y1, + M const& x2, N const& y2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + vec<2, U, Q> const& v1, + vec<2, V, Q> const& v2); + + // -- Matrix conversions -- + + template + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, U, P> const& m); + + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator=(mat<2, 2, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator+=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator+=(mat<2, 2, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator-=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator-=(mat<2, 2, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator*=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator*=(mat<2, 2, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator/=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator/=(mat<2, 2, U, Q> const& m); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator++ (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator-- (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator+(T scalar, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator-(T scalar, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator*(T scalar, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type operator*(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 2, T, Q>::row_type operator*(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator/(T scalar, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type operator/(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 2, T, Q>::row_type operator/(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); +} //namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat2x2.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x2.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x2.inl new file mode 100644 index 000000000000..88eca20b4040 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x2.inl @@ -0,0 +1,536 @@ +#include "../matrix.hpp" + +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0), col_type(0, 1)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0); + this->value[1] = col_type(0, 1); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 2, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(T scalar) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(scalar, 0), col_type(0, scalar)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(scalar, 0); + this->value[1] = col_type(0, scalar); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat + ( + T const& x0, T const& y0, + T const& x1, T const& y1 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0), col_type(x1, y1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0); + this->value[1] = col_type(x1, y1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(col_type const& v0, col_type const& v1) +# if GLM_HAS_INITIALIZER_LISTS + : value{v0, v1} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; +# endif + } + + // -- Conversion constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat + ( + X1 const& x1, Y1 const& y1, + X2 const& x2, Y2 const& y2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(static_cast(x1), value_type(y1)), col_type(static_cast(x2), value_type(y2)) } +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(static_cast(x1), value_type(y1)); + this->value[1] = col_type(static_cast(x2), value_type(y2)); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); +# endif + } + + // -- mat2x2 matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 2, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type& mat<2, 2, T, Q>::operator[](typename mat<2, 2, T, Q>::length_type i) GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type const& mat<2, 2, T, Q>::operator[](typename mat<2, 2, T, Q>::length_type i) const GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator=(mat<2, 2, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator+=(U scalar) + { + this->value[0] += scalar; + this->value[1] += scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator+=(mat<2, 2, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator-=(U scalar) + { + this->value[0] -= scalar; + this->value[1] -= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator-=(mat<2, 2, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator*=(U scalar) + { + this->value[0] *= scalar; + this->value[1] *= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator*=(mat<2, 2, U, Q> const& m) + { + return (*this = *this * m); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator/=(U scalar) + { + this->value[0] /= scalar; + this->value[1] /= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator/=(mat<2, 2, U, Q> const& m) + { + return *this *= inverse(m); + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> mat<2, 2, T, Q>::operator++(int) + { + mat<2, 2, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> mat<2, 2, T, Q>::operator--(int) + { + mat<2, 2, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + -m[0], + -m[1]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m, T scalar) + { + return mat<2, 2, T, Q>( + m[0] + scalar, + m[1] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator+(T scalar, mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + m[0] + scalar, + m[1] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return mat<2, 2, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m, T scalar) + { + return mat<2, 2, T, Q>( + m[0] - scalar, + m[1] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator-(T scalar, mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + scalar - m[0], + scalar - m[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return mat<2, 2, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m, T scalar) + { + return mat<2, 2, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator*(T scalar, mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type operator* + ( + mat<2, 2, T, Q> const& m, + typename mat<2, 2, T, Q>::row_type const& v + ) + { + return vec<2, T, Q>( + m[0][0] * v.x + m[1][0] * v.y, + m[0][1] * v.x + m[1][1] * v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::row_type operator* + ( + typename mat<2, 2, T, Q>::col_type const& v, + mat<2, 2, T, Q> const& m + ) + { + return vec<2, T, Q>( + v.x * m[0][0] + v.y * m[0][1], + v.x * m[1][0] + v.y * m[1][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return mat<2, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return mat<3, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return mat<4, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m, T scalar) + { + return mat<2, 2, T, Q>( + m[0] / scalar, + m[1] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator/(T scalar, mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + scalar / m[0], + scalar / m[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type operator/(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v) + { + return inverse(m) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::row_type operator/(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m) + { + return v * inverse(m); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + mat<2, 2, T, Q> m1_copy(m1); + return m1_copy /= m2; + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]); + } +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x3.hpp new file mode 100644 index 000000000000..21015f485d15 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x3.hpp @@ -0,0 +1,159 @@ +/// @ref core +/// @file glm/detail/type_mat2x3.hpp + +#pragma once + +#include "type_vec2.hpp" +#include "type_vec3.hpp" +#include +#include + +namespace glm +{ + template + struct mat<2, 3, T, Q> + { + typedef vec<3, T, Q> col_type; + typedef vec<2, T, Q> row_type; + typedef mat<2, 3, T, Q> type; + typedef mat<3, 2, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[2]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 3, T, P> const& m); + + GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + T x0, T y0, T z0, + T x1, T y1, T z1); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + col_type const& v0, + col_type const& v1); + + // -- Conversions -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + X1 x1, Y1 y1, Z1 z1, + X2 x2, Y2 y2, Z2 z2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + vec<3, U, Q> const& v1, + vec<3, V, Q> const& v2); + + // -- Matrix conversions -- + + template + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, U, P> const& m); + + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator=(mat<2, 3, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator+=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator+=(mat<2, 3, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator-=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator-=(mat<2, 3, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator*=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator++ (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator-- (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator*(T scalar, mat<2, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type operator*(mat<2, 3, T, Q> const& m, typename mat<2, 3, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 3, T, Q>::row_type operator*(typename mat<2, 3, T, Q>::col_type const& v, mat<2, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator/(mat<2, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator/(T scalar, mat<2, 3, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat2x3.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x3.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x3.inl new file mode 100644 index 000000000000..88077518f9f0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x3.inl @@ -0,0 +1,510 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0), col_type(0, 1, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0); + this->value[1] = col_type(0, 1, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 3, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m.value[0]; + this->value[1] = m.value[1]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(T scalar) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(scalar, 0, 0), col_type(0, scalar, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(scalar, 0, 0); + this->value[1] = col_type(0, scalar, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat + ( + T x0, T y0, T z0, + T x1, T y1, T z1 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0, z0), col_type(x1, y1, z1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0); + this->value[1] = col_type(x1, y1, z1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(col_type const& v0, col_type const& v1) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v0); + this->value[1] = col_type(v1); +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X1, typename Y1, typename Z1, + typename X2, typename Y2, typename Z2> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat + ( + X1 x1, Y1 y1, Z1 z1, + X2 x2, Y2 y2, Z2 z2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x1, y1, z1), col_type(x2, y2, z2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x1, y1, z1); + this->value[1] = col_type(x2, y2, z2); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 3, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type & mat<2, 3, T, Q>::operator[](typename mat<2, 3, T, Q>::length_type i) GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type const& mat<2, 3, T, Q>::operator[](typename mat<2, 3, T, Q>::length_type i) const GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator=(mat<2, 3, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator+=(mat<2, 3, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator-=(mat<2, 3, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> mat<2, 3, T, Q>::operator++(int) + { + mat<2, 3, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> mat<2, 3, T, Q>::operator--(int) + { + mat<2, 3, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m) + { + return mat<2, 3, T, Q>( + -m[0], + -m[1]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m, T scalar) + { + return mat<2, 3, T, Q>( + m[0] + scalar, + m[1] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return mat<2, 3, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m, T scalar) + { + return mat<2, 3, T, Q>( + m[0] - scalar, + m[1] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return mat<2, 3, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m, T scalar) + { + return mat<2, 3, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator*(T scalar, mat<2, 3, T, Q> const& m) + { + return mat<2, 3, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type operator* + ( + mat<2, 3, T, Q> const& m, + typename mat<2, 3, T, Q>::row_type const& v) + { + return typename mat<2, 3, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y, + m[0][1] * v.x + m[1][1] * v.y, + m[0][2] * v.x + m[1][2] * v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::row_type operator* + ( + typename mat<2, 3, T, Q>::col_type const& v, + mat<2, 3, T, Q> const& m) + { + return typename mat<2, 3, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2], + v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return mat<2, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + T SrcA00 = m1[0][0]; + T SrcA01 = m1[0][1]; + T SrcA02 = m1[0][2]; + T SrcA10 = m1[1][0]; + T SrcA11 = m1[1][1]; + T SrcA12 = m1[1][2]; + + T SrcB00 = m2[0][0]; + T SrcB01 = m2[0][1]; + T SrcB10 = m2[1][0]; + T SrcB11 = m2[1][1]; + T SrcB20 = m2[2][0]; + T SrcB21 = m2[2][1]; + + mat<3, 3, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01; + Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11; + Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11; + Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21; + Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21; + Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return mat<4, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1], + m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator/(mat<2, 3, T, Q> const& m, T scalar) + { + return mat<2, 3, T, Q>( + m[0] / scalar, + m[1] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator/(T scalar, mat<2, 3, T, Q> const& m) + { + return mat<2, 3, T, Q>( + scalar / m[0], + scalar / m[1]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]); + } +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x4.hpp new file mode 100644 index 000000000000..ee6dc68574e5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x4.hpp @@ -0,0 +1,161 @@ +/// @ref core +/// @file glm/detail/type_mat2x4.hpp + +#pragma once + +#include "type_vec2.hpp" +#include "type_vec4.hpp" +#include +#include + +namespace glm +{ + template + struct mat<2, 4, T, Q> + { + typedef vec<4, T, Q> col_type; + typedef vec<2, T, Q> row_type; + typedef mat<2, 4, T, Q> type; + typedef mat<4, 2, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[2]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 4, T, P> const& m); + + GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + T x0, T y0, T z0, T w0, + T x1, T y1, T z1, T w1); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + col_type const& v0, + col_type const& v1); + + // -- Conversions -- + + template< + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2> + GLM_FUNC_DECL GLM_CONSTEXPR mat( + X1 x1, Y1 y1, Z1 z1, W1 w1, + X2 x2, Y2 y2, Z2 z2, W2 w2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + vec<4, U, Q> const& v1, + vec<4, V, Q> const& v2); + + // -- Matrix conversions -- + + template + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, U, P> const& m); + + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator=(mat<2, 4, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator+=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator+=(mat<2, 4, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator-=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator-=(mat<2, 4, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator*=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator++ (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator-- (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator*(T scalar, mat<2, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type operator*(mat<2, 4, T, Q> const& m, typename mat<2, 4, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 4, T, Q>::row_type operator*(typename mat<2, 4, T, Q>::col_type const& v, mat<2, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator/(mat<2, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator/(T scalar, mat<2, 4, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat2x4.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x4.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x4.inl new file mode 100644 index 000000000000..e70753c06a3a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat2x4.inl @@ -0,0 +1,520 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0, 0); + this->value[1] = col_type(0, 1, 0, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 4, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0, 0, 0); + this->value[1] = col_type(0, s, 0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat + ( + T x0, T y0, T z0, T w0, + T x1, T y1, T z1, T w1 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0, z0, w0), col_type(x1, y1, z1, w1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0, w0); + this->value[1] = col_type(x1, y1, z1, w1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(col_type const& v0, col_type const& v1) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat + ( + X1 x1, Y1 y1, Z1 z1, W1 w1, + X2 x2, Y2 y2, Z2 z2, W2 w2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{ + col_type(x1, y1, z1, w1), + col_type(x2, y2, z2, w2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x1, y1, z1, w1); + this->value[1] = col_type(x2, y2, z2, w2); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(vec<4, V1, Q> const& v1, vec<4, V2, Q> const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 4, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type & mat<2, 4, T, Q>::operator[](typename mat<2, 4, T, Q>::length_type i) GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type const& mat<2, 4, T, Q>::operator[](typename mat<2, 4, T, Q>::length_type i) const GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator=(mat<2, 4, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator+=(mat<2, 4, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator-=(mat<2, 4, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> & mat<2, 4, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> mat<2, 4, T, Q>::operator++(int) + { + mat<2, 4, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> mat<2, 4, T, Q>::operator--(int) + { + mat<2, 4, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m) + { + return mat<2, 4, T, Q>( + -m[0], + -m[1]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m, T scalar) + { + return mat<2, 4, T, Q>( + m[0] + scalar, + m[1] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return mat<2, 4, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m, T scalar) + { + return mat<2, 4, T, Q>( + m[0] - scalar, + m[1] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return mat<2, 4, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m, T scalar) + { + return mat<2, 4, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator*(T scalar, mat<2, 4, T, Q> const& m) + { + return mat<2, 4, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type operator*(mat<2, 4, T, Q> const& m, typename mat<2, 4, T, Q>::row_type const& v) + { + return typename mat<2, 4, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y, + m[0][1] * v.x + m[1][1] * v.y, + m[0][2] * v.x + m[1][2] * v.y, + m[0][3] * v.x + m[1][3] * v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::row_type operator*(typename mat<2, 4, T, Q>::col_type const& v, mat<2, 4, T, Q> const& m) + { + return typename mat<2, 4, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2] + v.w * m[0][3], + v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2] + v.w * m[1][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + T SrcA00 = m1[0][0]; + T SrcA01 = m1[0][1]; + T SrcA02 = m1[0][2]; + T SrcA03 = m1[0][3]; + T SrcA10 = m1[1][0]; + T SrcA11 = m1[1][1]; + T SrcA12 = m1[1][2]; + T SrcA13 = m1[1][3]; + + T SrcB00 = m2[0][0]; + T SrcB01 = m2[0][1]; + T SrcB10 = m2[1][0]; + T SrcB11 = m2[1][1]; + T SrcB20 = m2[2][0]; + T SrcB21 = m2[2][1]; + T SrcB30 = m2[3][0]; + T SrcB31 = m2[3][1]; + + mat<4, 4, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01; + Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01; + Result[0][3] = SrcA03 * SrcB00 + SrcA13 * SrcB01; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11; + Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11; + Result[1][3] = SrcA03 * SrcB10 + SrcA13 * SrcB11; + Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21; + Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21; + Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21; + Result[2][3] = SrcA03 * SrcB20 + SrcA13 * SrcB21; + Result[3][0] = SrcA00 * SrcB30 + SrcA10 * SrcB31; + Result[3][1] = SrcA01 * SrcB30 + SrcA11 * SrcB31; + Result[3][2] = SrcA02 * SrcB30 + SrcA12 * SrcB31; + Result[3][3] = SrcA03 * SrcB30 + SrcA13 * SrcB31; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return mat<2, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return mat<3, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1], + m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator/(mat<2, 4, T, Q> const& m, T scalar) + { + return mat<2, 4, T, Q>( + m[0] / scalar, + m[1] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator/(T scalar, mat<2, 4, T, Q> const& m) + { + return mat<2, 4, T, Q>( + scalar / m[0], + scalar / m[1]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]); + } +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x2.hpp new file mode 100644 index 000000000000..4fe521aa89a0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x2.hpp @@ -0,0 +1,167 @@ +/// @ref core +/// @file glm/detail/type_mat3x2.hpp + +#pragma once + +#include "type_vec2.hpp" +#include "type_vec3.hpp" +#include +#include + +namespace glm +{ + template + struct mat<3, 2, T, Q> + { + typedef vec<2, T, Q> col_type; + typedef vec<3, T, Q> row_type; + typedef mat<3, 2, T, Q> type; + typedef mat<2, 3, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[3]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 2, T, P> const& m); + + GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + T x0, T y0, + T x1, T y1, + T x2, T y2); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + col_type const& v0, + col_type const& v1, + col_type const& v2); + + // -- Conversions -- + + template< + typename X1, typename Y1, + typename X2, typename Y2, + typename X3, typename Y3> + GLM_FUNC_DECL GLM_CONSTEXPR mat( + X1 x1, Y1 y1, + X2 x2, Y2 y2, + X3 x3, Y3 y3); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + vec<2, V1, Q> const& v1, + vec<2, V2, Q> const& v2, + vec<2, V3, Q> const& v3); + + // -- Matrix conversions -- + + template + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, U, P> const& m); + + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator=(mat<3, 2, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator+=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator+=(mat<3, 2, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator-=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator-=(mat<3, 2, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator*=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator++ (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator-- (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator*(T scalar, mat<3, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type operator*(mat<3, 2, T, Q> const& m, typename mat<3, 2, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 2, T, Q>::row_type operator*(typename mat<3, 2, T, Q>::col_type const& v, mat<3, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator/(mat<3, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator/(T scalar, mat<3, 2, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat3x2.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x2.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x2.inl new file mode 100644 index 000000000000..5a1b4c010a04 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x2.inl @@ -0,0 +1,532 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0), col_type(0, 1), col_type(0, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0); + this->value[1] = col_type(0, 1); + this->value[2] = col_type(0, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 2, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0), col_type(0, s), col_type(0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0); + this->value[1] = col_type(0, s); + this->value[2] = col_type(0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat + ( + T x0, T y0, + T x1, T y1, + T x2, T y2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0); + this->value[1] = col_type(x1, y1); + this->value[2] = col_type(x2, y2); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; + this->value[2] = v2; +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X0, typename Y0, + typename X1, typename Y1, + typename X2, typename Y2> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat + ( + X0 x0, Y0 y0, + X1 x1, Y1 y1, + X2 x2, Y2 y2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0); + this->value[1] = col_type(x1, y1); + this->value[2] = col_type(x2, y2); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(vec<2, V0, Q> const& v0, vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v0); + this->value[1] = col_type(v1); + this->value[2] = col_type(v2); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 2, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type & mat<3, 2, T, Q>::operator[](typename mat<3, 2, T, Q>::length_type i) GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type const& mat<3, 2, T, Q>::operator[](typename mat<3, 2, T, Q>::length_type i) const GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator=(mat<3, 2, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator+=(mat<3, 2, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator-=(mat<3, 2, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> & mat<3, 2, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> mat<3, 2, T, Q>::operator++(int) + { + mat<3, 2, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> mat<3, 2, T, Q>::operator--(int) + { + mat<3, 2, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m) + { + return mat<3, 2, T, Q>( + -m[0], + -m[1], + -m[2]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m, T scalar) + { + return mat<3, 2, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return mat<3, 2, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m, T scalar) + { + return mat<3, 2, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return mat<3, 2, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m, T scalar) + { + return mat<3, 2, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator*(T scalar, mat<3, 2, T, Q> const& m) + { + return mat<3, 2, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type operator*(mat<3, 2, T, Q> const& m, typename mat<3, 2, T, Q>::row_type const& v) + { + return typename mat<3, 2, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, + m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::row_type operator*(typename mat<3, 2, T, Q>::col_type const& v, mat<3, 2, T, Q> const& m) + { + return typename mat<3, 2, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1], + v.x * m[1][0] + v.y * m[1][1], + v.x * m[2][0] + v.y * m[2][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + const T SrcA00 = m1[0][0]; + const T SrcA01 = m1[0][1]; + const T SrcA10 = m1[1][0]; + const T SrcA11 = m1[1][1]; + const T SrcA20 = m1[2][0]; + const T SrcA21 = m1[2][1]; + + const T SrcB00 = m2[0][0]; + const T SrcB01 = m2[0][1]; + const T SrcB02 = m2[0][2]; + const T SrcB10 = m2[1][0]; + const T SrcB11 = m2[1][1]; + const T SrcB12 = m2[1][2]; + + mat<2, 2, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return mat<3, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return mat<4, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator/(mat<3, 2, T, Q> const& m, T scalar) + { + return mat<3, 2, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator/(T scalar, mat<3, 2, T, Q> const& m) + { + return mat<3, 2, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); + } +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x3.hpp new file mode 100644 index 000000000000..fc7df9824a0f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x3.hpp @@ -0,0 +1,184 @@ +/// @ref core +/// @file glm/detail/type_mat3x3.hpp + +#pragma once + +#include "type_vec3.hpp" +#include +#include + +namespace glm +{ + template + struct mat<3, 3, T, Q> + { + typedef vec<3, T, Q> col_type; + typedef vec<3, T, Q> row_type; + typedef mat<3, 3, T, Q> type; + typedef mat<3, 3, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[3]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 3, T, P> const& m); + + GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + T x0, T y0, T z0, + T x1, T y1, T z1, + T x2, T y2, T z2); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + col_type const& v0, + col_type const& v1, + col_type const& v2); + + // -- Conversions -- + + template< + typename X1, typename Y1, typename Z1, + typename X2, typename Y2, typename Z2, + typename X3, typename Y3, typename Z3> + GLM_FUNC_DECL GLM_CONSTEXPR mat( + X1 x1, Y1 y1, Z1 z1, + X2 x2, Y2 y2, Z2 z2, + X3 x3, Y3 y3, Z3 z3); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + vec<3, V1, Q> const& v1, + vec<3, V2, Q> const& v2, + vec<3, V3, Q> const& v3); + + // -- Matrix conversions -- + + template + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, U, P> const& m); + + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator=(mat<3, 3, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator+=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator+=(mat<3, 3, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator-=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator-=(mat<3, 3, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator*=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator*=(mat<3, 3, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator/=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator/=(mat<3, 3, U, Q> const& m); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator++(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator+(T scalar, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator-(T scalar, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator*(T scalar, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type operator*(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 3, T, Q>::row_type operator*(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator/(T scalar, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type operator/(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 3, T, Q>::row_type operator/(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat3x3.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x3.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x3.inl new file mode 100644 index 000000000000..ddb00d46bd23 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x3.inl @@ -0,0 +1,601 @@ +#include "../matrix.hpp" + +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0), col_type(0, 1, 0), col_type(0, 0, 1)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0); + this->value[1] = col_type(0, 1, 0); + this->value[2] = col_type(0, 0, 1); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 3, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0, 0), col_type(0, s, 0), col_type(0, 0, s)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0, 0); + this->value[1] = col_type(0, s, 0); + this->value[2] = col_type(0, 0, s); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat + ( + T x0, T y0, T z0, + T x1, T y1, T z1, + T x2, T y2, T z2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0); + this->value[1] = col_type(x1, y1, z1); + this->value[2] = col_type(x2, y2, z2); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v0); + this->value[1] = col_type(v1); + this->value[2] = col_type(v2); +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X1, typename Y1, typename Z1, + typename X2, typename Y2, typename Z2, + typename X3, typename Y3, typename Z3> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat + ( + X1 x1, Y1 y1, Z1 z1, + X2 x2, Y2 y2, Z2 z2, + X3 x3, Y3 y3, Z3 z3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x1, y1, z1); + this->value[1] = col_type(x2, y2, z2); + this->value[2] = col_type(x3, y3, z3); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2, vec<3, V3, Q> const& v3) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2), col_type(v3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); + this->value[2] = col_type(v3); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 3, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type & mat<3, 3, T, Q>::operator[](typename mat<3, 3, T, Q>::length_type i) GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type const& mat<3, 3, T, Q>::operator[](typename mat<3, 3, T, Q>::length_type i) const GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator=(mat<3, 3, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator+=(mat<3, 3, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator-=(mat<3, 3, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator*=(mat<3, 3, U, Q> const& m) + { + return (*this = *this * m); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator/=(mat<3, 3, U, Q> const& m) + { + return *this *= inverse(m); + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> mat<3, 3, T, Q>::operator++(int) + { + mat<3, 3, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> mat<3, 3, T, Q>::operator--(int) + { + mat<3, 3, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m) + { + return mat<3, 3, T, Q>( + -m[0], + -m[1], + -m[2]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m, T scalar) + { + return mat<3, 3, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator+(T scalar, mat<3, 3, T, Q> const& m) + { + return mat<3, 3, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return mat<3, 3, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m, T scalar) + { + return mat<3, 3, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator-(T scalar, mat<3, 3, T, Q> const& m) + { + return mat<3, 3, T, Q>( + scalar - m[0], + scalar - m[1], + scalar - m[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return mat<3, 3, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m, T scalar) + { + return mat<3, 3, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator*(T scalar, mat<3, 3, T, Q> const& m) + { + return mat<3, 3, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type operator*(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v) + { + return typename mat<3, 3, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, + m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z, + m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::row_type operator*(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m) + { + return typename mat<3, 3, T, Q>::row_type( + m[0][0] * v.x + m[0][1] * v.y + m[0][2] * v.z, + m[1][0] * v.x + m[1][1] * v.y + m[1][2] * v.z, + m[2][0] * v.x + m[2][1] * v.y + m[2][2] * v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + T const SrcA00 = m1[0][0]; + T const SrcA01 = m1[0][1]; + T const SrcA02 = m1[0][2]; + T const SrcA10 = m1[1][0]; + T const SrcA11 = m1[1][1]; + T const SrcA12 = m1[1][2]; + T const SrcA20 = m1[2][0]; + T const SrcA21 = m1[2][1]; + T const SrcA22 = m1[2][2]; + + T const SrcB00 = m2[0][0]; + T const SrcB01 = m2[0][1]; + T const SrcB02 = m2[0][2]; + T const SrcB10 = m2[1][0]; + T const SrcB11 = m2[1][1]; + T const SrcB12 = m2[1][2]; + T const SrcB20 = m2[2][0]; + T const SrcB21 = m2[2][1]; + T const SrcB22 = m2[2][2]; + + mat<3, 3, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; + Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; + Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12; + Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22; + Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22; + Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return mat<2, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return mat<4, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2], + m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1] + m1[2][2] * m2[3][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m, T scalar) + { + return mat<3, 3, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator/(T scalar, mat<3, 3, T, Q> const& m) + { + return mat<3, 3, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type operator/(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v) + { + return inverse(m) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::row_type operator/(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m) + { + return v * inverse(m); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + mat<3, 3, T, Q> m1_copy(m1); + return m1_copy /= m2; + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); + } +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x4.hpp new file mode 100644 index 000000000000..e2617a32952b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x4.hpp @@ -0,0 +1,166 @@ +/// @ref core +/// @file glm/detail/type_mat3x4.hpp + +#pragma once + +#include "type_vec3.hpp" +#include "type_vec4.hpp" +#include +#include + +namespace glm +{ + template + struct mat<3, 4, T, Q> + { + typedef vec<4, T, Q> col_type; + typedef vec<3, T, Q> row_type; + typedef mat<3, 4, T, Q> type; + typedef mat<4, 3, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[3]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 4, T, P> const& m); + + GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + T x0, T y0, T z0, T w0, + T x1, T y1, T z1, T w1, + T x2, T y2, T z2, T w2); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + col_type const& v0, + col_type const& v1, + col_type const& v2); + + // -- Conversions -- + + template< + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2, + typename X3, typename Y3, typename Z3, typename W3> + GLM_FUNC_DECL GLM_CONSTEXPR mat( + X1 x1, Y1 y1, Z1 z1, W1 w1, + X2 x2, Y2 y2, Z2 z2, W2 w2, + X3 x3, Y3 y3, Z3 z3, W3 w3); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + vec<4, V1, Q> const& v1, + vec<4, V2, Q> const& v2, + vec<4, V3, Q> const& v3); + + // -- Matrix conversions -- + + template + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, U, P> const& m); + + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator=(mat<3, 4, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator+=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator+=(mat<3, 4, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator-=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator-=(mat<3, 4, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator*=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator++(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator*(T scalar, mat<3, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type operator*(mat<3, 4, T, Q> const& m, typename mat<3, 4, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 4, T, Q>::row_type operator*(typename mat<3, 4, T, Q>::col_type const& v, mat<3, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator/(mat<3, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator/(T scalar, mat<3, 4, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat3x4.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x4.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x4.inl new file mode 100644 index 000000000000..5e2eccd85d9b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat3x4.inl @@ -0,0 +1,578 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0), col_type(0, 0, 1, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0, 0); + this->value[1] = col_type(0, 1, 0, 0); + this->value[2] = col_type(0, 0, 1, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 4, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0), col_type(0, 0, s, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0, 0, 0); + this->value[1] = col_type(0, s, 0, 0); + this->value[2] = col_type(0, 0, s, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat + ( + T x0, T y0, T z0, T w0, + T x1, T y1, T z1, T w1, + T x2, T y2, T z2, T w2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{ + col_type(x0, y0, z0, w0), + col_type(x1, y1, z1, w1), + col_type(x2, y2, z2, w2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0, w0); + this->value[1] = col_type(x1, y1, z1, w1); + this->value[2] = col_type(x2, y2, z2, w2); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; + this->value[2] = v2; +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X0, typename Y0, typename Z0, typename W0, + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat + ( + X0 x0, Y0 y0, Z0 z0, W0 w0, + X1 x1, Y1 y1, Z1 z1, W1 w1, + X2 x2, Y2 y2, Z2 z2, W2 w2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{ + col_type(x0, y0, z0, w0), + col_type(x1, y1, z1, w1), + col_type(x2, y2, z2, w2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0, w0); + this->value[1] = col_type(x1, y1, z1, w1); + this->value[2] = col_type(x2, y2, z2, w2); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(vec<4, V1, Q> const& v0, vec<4, V2, Q> const& v1, vec<4, V3, Q> const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v0); + this->value[1] = col_type(v1); + this->value[2] = col_type(v2); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 4, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(0, 0, 1, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(0, 0, 1, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(m[2], 1, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0, 0, 1, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(m[2], 1, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 0); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type & mat<3, 4, T, Q>::operator[](typename mat<3, 4, T, Q>::length_type i) GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type const& mat<3, 4, T, Q>::operator[](typename mat<3, 4, T, Q>::length_type i) const GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator=(mat<3, 4, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator+=(mat<3, 4, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator-=(mat<3, 4, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> & mat<3, 4, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> mat<3, 4, T, Q>::operator++(int) + { + mat<3, 4, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> mat<3, 4, T, Q>::operator--(int) + { + mat<3, 4, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m) + { + return mat<3, 4, T, Q>( + -m[0], + -m[1], + -m[2]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m, T scalar) + { + return mat<3, 4, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return mat<3, 4, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m, T scalar) + { + return mat<3, 4, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return mat<3, 4, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m, T scalar) + { + return mat<3, 4, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator*(T scalar, mat<3, 4, T, Q> const& m) + { + return mat<3, 4, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type operator* + ( + mat<3, 4, T, Q> const& m, + typename mat<3, 4, T, Q>::row_type const& v + ) + { + return typename mat<3, 4, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, + m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z, + m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z, + m[0][3] * v.x + m[1][3] * v.y + m[2][3] * v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::row_type operator* + ( + typename mat<3, 4, T, Q>::col_type const& v, + mat<3, 4, T, Q> const& m + ) + { + return typename mat<3, 4, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2] + v.w * m[0][3], + v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2] + v.w * m[1][3], + v.x * m[2][0] + v.y * m[2][1] + v.z * m[2][2] + v.w * m[2][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + const T SrcA00 = m1[0][0]; + const T SrcA01 = m1[0][1]; + const T SrcA02 = m1[0][2]; + const T SrcA03 = m1[0][3]; + const T SrcA10 = m1[1][0]; + const T SrcA11 = m1[1][1]; + const T SrcA12 = m1[1][2]; + const T SrcA13 = m1[1][3]; + const T SrcA20 = m1[2][0]; + const T SrcA21 = m1[2][1]; + const T SrcA22 = m1[2][2]; + const T SrcA23 = m1[2][3]; + + const T SrcB00 = m2[0][0]; + const T SrcB01 = m2[0][1]; + const T SrcB02 = m2[0][2]; + const T SrcB10 = m2[1][0]; + const T SrcB11 = m2[1][1]; + const T SrcB12 = m2[1][2]; + const T SrcB20 = m2[2][0]; + const T SrcB21 = m2[2][1]; + const T SrcB22 = m2[2][2]; + const T SrcB30 = m2[3][0]; + const T SrcB31 = m2[3][1]; + const T SrcB32 = m2[3][2]; + + mat<4, 4, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; + Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02; + Result[0][3] = SrcA03 * SrcB00 + SrcA13 * SrcB01 + SrcA23 * SrcB02; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; + Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12; + Result[1][3] = SrcA03 * SrcB10 + SrcA13 * SrcB11 + SrcA23 * SrcB12; + Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22; + Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22; + Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22; + Result[2][3] = SrcA03 * SrcB20 + SrcA13 * SrcB21 + SrcA23 * SrcB22; + Result[3][0] = SrcA00 * SrcB30 + SrcA10 * SrcB31 + SrcA20 * SrcB32; + Result[3][1] = SrcA01 * SrcB30 + SrcA11 * SrcB31 + SrcA21 * SrcB32; + Result[3][2] = SrcA02 * SrcB30 + SrcA12 * SrcB31 + SrcA22 * SrcB32; + Result[3][3] = SrcA03 * SrcB30 + SrcA13 * SrcB31 + SrcA23 * SrcB32; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return mat<2, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return mat<3, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2], + m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1] + m1[2][3] * m2[2][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator/(mat<3, 4, T, Q> const& m, T scalar) + { + return mat<3, 4, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator/(T scalar, mat<3, 4, T, Q> const& m) + { + return mat<3, 4, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); + } +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x2.hpp new file mode 100644 index 000000000000..5eb31184c8fd --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x2.hpp @@ -0,0 +1,171 @@ +/// @ref core +/// @file glm/detail/type_mat4x2.hpp + +#pragma once + +#include "type_vec2.hpp" +#include "type_vec4.hpp" +#include +#include + +namespace glm +{ + template + struct mat<4, 2, T, Q> + { + typedef vec<2, T, Q> col_type; + typedef vec<4, T, Q> row_type; + typedef mat<4, 2, T, Q> type; + typedef mat<2, 4, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[4]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 4; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 2, T, P> const& m); + + GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + T x0, T y0, + T x1, T y1, + T x2, T y2, + T x3, T y3); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + col_type const& v0, + col_type const& v1, + col_type const& v2, + col_type const& v3); + + // -- Conversions -- + + template< + typename X0, typename Y0, + typename X1, typename Y1, + typename X2, typename Y2, + typename X3, typename Y3> + GLM_FUNC_DECL GLM_CONSTEXPR mat( + X0 x0, Y0 y0, + X1 x1, Y1 y1, + X2 x2, Y2 y2, + X3 x3, Y3 y3); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + vec<2, V1, Q> const& v1, + vec<2, V2, Q> const& v2, + vec<2, V3, Q> const& v3, + vec<2, V4, Q> const& v4); + + // -- Matrix conversions -- + + template + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, U, P> const& m); + + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator=(mat<4, 2, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator+=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator+=(mat<4, 2, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator-=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator-=(mat<4, 2, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator*=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator++ (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator-- (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator*(T scalar, mat<4, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type operator*(mat<4, 2, T, Q> const& m, typename mat<4, 2, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 2, T, Q>::row_type operator*(typename mat<4, 2, T, Q>::col_type const& v, mat<4, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator/(mat<4, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator/(T scalar, mat<4, 2, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat4x2.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x2.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x2.inl new file mode 100644 index 000000000000..1a4f3f168d01 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x2.inl @@ -0,0 +1,574 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0), col_type(0, 1), col_type(0, 0), col_type(0, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0); + this->value[1] = col_type(0, 1); + this->value[2] = col_type(0, 0); + this->value[3] = col_type(0, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 2, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0), col_type(0, s), col_type(0, 0), col_type(0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0); + this->value[1] = col_type(0, s); + this->value[2] = col_type(0, 0); + this->value[3] = col_type(0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat + ( + T x0, T y0, + T x1, T y1, + T x2, T y2, + T x3, T y3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2), col_type(x3, y3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0); + this->value[1] = col_type(x1, y1); + this->value[2] = col_type(x2, y2); + this->value[3] = col_type(x3, y3); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; + this->value[2] = v2; + this->value[3] = v3; +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X0, typename Y0, + typename X1, typename Y1, + typename X2, typename Y2, + typename X3, typename Y3> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat + ( + X0 x0, Y0 y0, + X1 x1, Y1 y1, + X2 x2, Y2 y2, + X3 x3, Y3 y3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2), col_type(x3, y3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0); + this->value[1] = col_type(x1, y1); + this->value[2] = col_type(x2, y2); + this->value[3] = col_type(x3, y3); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(vec<2, V0, Q> const& v0, vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2, vec<2, V3, Q> const& v3) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v0); + this->value[1] = col_type(v1); + this->value[2] = col_type(v2); + this->value[3] = col_type(v3); +# endif + } + + // -- Conversion -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 2, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(0); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type & mat<4, 2, T, Q>::operator[](typename mat<4, 2, T, Q>::length_type i) GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type const& mat<4, 2, T, Q>::operator[](typename mat<4, 2, T, Q>::length_type i) const GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>& mat<4, 2, T, Q>::operator=(mat<4, 2, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + this->value[3] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator+=(mat<4, 2, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + this->value[3] += m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + this->value[3] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator-=(mat<4, 2, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + this->value[3] -= m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + this->value[3] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + this->value[3] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + ++this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + --this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> mat<4, 2, T, Q>::operator++(int) + { + mat<4, 2, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> mat<4, 2, T, Q>::operator--(int) + { + mat<4, 2, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m) + { + return mat<4, 2, T, Q>( + -m[0], + -m[1], + -m[2], + -m[3]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m, T scalar) + { + return mat<4, 2, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar, + m[3] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return mat<4, 2, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2], + m1[3] + m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m, T scalar) + { + return mat<4, 2, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar, + m[3] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return mat<4, 2, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2], + m1[3] - m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m, T scalar) + { + return mat<4, 2, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator*(T scalar, mat<4, 2, T, Q> const& m) + { + return mat<4, 2, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type operator*(mat<4, 2, T, Q> const& m, typename mat<4, 2, T, Q>::row_type const& v) + { + return typename mat<4, 2, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * v.w, + m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * v.w); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::row_type operator*(typename mat<4, 2, T, Q>::col_type const& v, mat<4, 2, T, Q> const& m) + { + return typename mat<4, 2, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1], + v.x * m[1][0] + v.y * m[1][1], + v.x * m[2][0] + v.y * m[2][1], + v.x * m[3][0] + v.y * m[3][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + T const SrcA00 = m1[0][0]; + T const SrcA01 = m1[0][1]; + T const SrcA10 = m1[1][0]; + T const SrcA11 = m1[1][1]; + T const SrcA20 = m1[2][0]; + T const SrcA21 = m1[2][1]; + T const SrcA30 = m1[3][0]; + T const SrcA31 = m1[3][1]; + + T const SrcB00 = m2[0][0]; + T const SrcB01 = m2[0][1]; + T const SrcB02 = m2[0][2]; + T const SrcB03 = m2[0][3]; + T const SrcB10 = m2[1][0]; + T const SrcB11 = m2[1][1]; + T const SrcB12 = m2[1][2]; + T const SrcB13 = m2[1][3]; + + mat<2, 2, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02 + SrcA30 * SrcB03; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02 + SrcA31 * SrcB03; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12 + SrcA30 * SrcB13; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12 + SrcA31 * SrcB13; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return mat<3, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return mat<4, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2] + m1[3][0] * m2[3][3], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2] + m1[3][1] * m2[3][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator/(mat<4, 2, T, Q> const& m, T scalar) + { + return mat<4, 2, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar, + m[3] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator/(T scalar, mat<4, 2, T, Q> const& m) + { + return mat<4, 2, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2], + scalar / m[3]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); + } +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x3.hpp new file mode 100644 index 000000000000..9650546a6305 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x3.hpp @@ -0,0 +1,171 @@ +/// @ref core +/// @file glm/detail/type_mat4x3.hpp + +#pragma once + +#include "type_vec3.hpp" +#include "type_vec4.hpp" +#include +#include + +namespace glm +{ + template + struct mat<4, 3, T, Q> + { + typedef vec<3, T, Q> col_type; + typedef vec<4, T, Q> row_type; + typedef mat<4, 3, T, Q> type; + typedef mat<3, 4, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[4]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 4; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 3, T, P> const& m); + + GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T s); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + T const& x0, T const& y0, T const& z0, + T const& x1, T const& y1, T const& z1, + T const& x2, T const& y2, T const& z2, + T const& x3, T const& y3, T const& z3); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + col_type const& v0, + col_type const& v1, + col_type const& v2, + col_type const& v3); + + // -- Conversions -- + + template< + typename X1, typename Y1, typename Z1, + typename X2, typename Y2, typename Z2, + typename X3, typename Y3, typename Z3, + typename X4, typename Y4, typename Z4> + GLM_FUNC_DECL GLM_CONSTEXPR mat( + X1 const& x1, Y1 const& y1, Z1 const& z1, + X2 const& x2, Y2 const& y2, Z2 const& z2, + X3 const& x3, Y3 const& y3, Z3 const& z3, + X4 const& x4, Y4 const& y4, Z4 const& z4); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + vec<3, V1, Q> const& v1, + vec<3, V2, Q> const& v2, + vec<3, V3, Q> const& v3, + vec<3, V4, Q> const& v4); + + // -- Matrix conversions -- + + template + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, U, P> const& m); + + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator=(mat<4, 3, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator+=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator+=(mat<4, 3, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator-=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator-=(mat<4, 3, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator*=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q>& operator++(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q>& operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator*(T scalar, mat<4, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type operator*(mat<4, 3, T, Q> const& m, typename mat<4, 3, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 3, T, Q>::row_type operator*(typename mat<4, 3, T, Q>::col_type const& v, mat<4, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator/(mat<4, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator/(T scalar, mat<4, 3, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat4x3.inl" +#endif //GLM_EXTERNAL_TEMPLATE diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x3.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x3.inl new file mode 100644 index 000000000000..c2fe3a44532d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x3.inl @@ -0,0 +1,598 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0), col_type(0, 1, 0), col_type(0, 0, 1), col_type(0, 0, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0); + this->value[1] = col_type(0, 1, 0); + this->value[2] = col_type(0, 0, 1); + this->value[3] = col_type(0, 0, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 3, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0, 0), col_type(0, s, 0), col_type(0, 0, s), col_type(0, 0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0, 0); + this->value[1] = col_type(0, s, 0); + this->value[2] = col_type(0, 0, s); + this->value[3] = col_type(0, 0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat + ( + T const& x0, T const& y0, T const& z0, + T const& x1, T const& y1, T const& z1, + T const& x2, T const& y2, T const& z2, + T const& x3, T const& y3, T const& z3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0); + this->value[1] = col_type(x1, y1, z1); + this->value[2] = col_type(x2, y2, z2); + this->value[3] = col_type(x3, y3, z3); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; + this->value[2] = v2; + this->value[3] = v3; +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X0, typename Y0, typename Z0, + typename X1, typename Y1, typename Z1, + typename X2, typename Y2, typename Z2, + typename X3, typename Y3, typename Z3> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat + ( + X0 const& x0, Y0 const& y0, Z0 const& z0, + X1 const& x1, Y1 const& y1, Z1 const& z1, + X2 const& x2, Y2 const& y2, Z2 const& z2, + X3 const& x3, Y3 const& y3, Z3 const& z3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0); + this->value[1] = col_type(x1, y1, z1); + this->value[2] = col_type(x2, y2, z2); + this->value[3] = col_type(x3, y3, z3); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2, vec<3, V3, Q> const& v3, vec<3, V4, Q> const& v4) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2), col_type(v3), col_type(v4)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); + this->value[2] = col_type(v3); + this->value[3] = col_type(v4); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 3, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(0, 0, 1); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0, 0, 1); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 1); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0, 0, 1); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1), col_type(m[3], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 1); + this->value[3] = col_type(m[3], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(0); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type & mat<4, 3, T, Q>::operator[](typename mat<4, 3, T, Q>::length_type i) GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type const& mat<4, 3, T, Q>::operator[](typename mat<4, 3, T, Q>::length_type i) const GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>& mat<4, 3, T, Q>::operator=(mat<4, 3, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + this->value[3] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator+=(mat<4, 3, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + this->value[3] += m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + this->value[3] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator-=(mat<4, 3, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + this->value[3] -= m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + this->value[3] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + this->value[3] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + ++this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + --this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> mat<4, 3, T, Q>::operator++(int) + { + mat<4, 3, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> mat<4, 3, T, Q>::operator--(int) + { + mat<4, 3, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m) + { + return mat<4, 3, T, Q>( + -m[0], + -m[1], + -m[2], + -m[3]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m, T scalar) + { + return mat<4, 3, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar, + m[3] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return mat<4, 3, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2], + m1[3] + m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m, T scalar) + { + return mat<4, 3, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar, + m[3] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return mat<4, 3, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2], + m1[3] - m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m, T scalar) + { + return mat<4, 3, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator*(T scalar, mat<4, 3, T, Q> const& m) + { + return mat<4, 3, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type operator* + ( + mat<4, 3, T, Q> const& m, + typename mat<4, 3, T, Q>::row_type const& v) + { + return typename mat<4, 3, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * v.w, + m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * v.w, + m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z + m[3][2] * v.w); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::row_type operator* + ( + typename mat<4, 3, T, Q>::col_type const& v, + mat<4, 3, T, Q> const& m) + { + return typename mat<4, 3, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2], + v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2], + v.x * m[2][0] + v.y * m[2][1] + v.z * m[2][2], + v.x * m[3][0] + v.y * m[3][1] + v.z * m[3][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return mat<2, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + T const SrcA00 = m1[0][0]; + T const SrcA01 = m1[0][1]; + T const SrcA02 = m1[0][2]; + T const SrcA10 = m1[1][0]; + T const SrcA11 = m1[1][1]; + T const SrcA12 = m1[1][2]; + T const SrcA20 = m1[2][0]; + T const SrcA21 = m1[2][1]; + T const SrcA22 = m1[2][2]; + T const SrcA30 = m1[3][0]; + T const SrcA31 = m1[3][1]; + T const SrcA32 = m1[3][2]; + + T const SrcB00 = m2[0][0]; + T const SrcB01 = m2[0][1]; + T const SrcB02 = m2[0][2]; + T const SrcB03 = m2[0][3]; + T const SrcB10 = m2[1][0]; + T const SrcB11 = m2[1][1]; + T const SrcB12 = m2[1][2]; + T const SrcB13 = m2[1][3]; + T const SrcB20 = m2[2][0]; + T const SrcB21 = m2[2][1]; + T const SrcB22 = m2[2][2]; + T const SrcB23 = m2[2][3]; + + mat<3, 3, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02 + SrcA30 * SrcB03; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02 + SrcA31 * SrcB03; + Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02 + SrcA32 * SrcB03; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12 + SrcA30 * SrcB13; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12 + SrcA31 * SrcB13; + Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12 + SrcA32 * SrcB13; + Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22 + SrcA30 * SrcB23; + Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22 + SrcA31 * SrcB23; + Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22 + SrcA32 * SrcB23; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return mat<4, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2] + m1[3][2] * m2[2][3], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2] + m1[3][0] * m2[3][3], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2] + m1[3][1] * m2[3][3], + m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1] + m1[2][2] * m2[3][2] + m1[3][2] * m2[3][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator/(mat<4, 3, T, Q> const& m, T scalar) + { + return mat<4, 3, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar, + m[3] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator/(T scalar, mat<4, 3, T, Q> const& m) + { + return mat<4, 3, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2], + scalar / m[3]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); + } +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4.hpp new file mode 100644 index 000000000000..14387e2f9b33 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4.hpp @@ -0,0 +1,189 @@ +/// @ref core +/// @file glm/detail/type_mat4x4.hpp + +#pragma once + +#include "type_vec4.hpp" +#include +#include + +namespace glm +{ + template + struct mat<4, 4, T, Q> + { + typedef vec<4, T, Q> col_type; + typedef vec<4, T, Q> row_type; + typedef mat<4, 4, T, Q> type; + typedef mat<4, 4, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[4]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 4, T, P> const& m); + + GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T s); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + T const& x0, T const& y0, T const& z0, T const& w0, + T const& x1, T const& y1, T const& z1, T const& w1, + T const& x2, T const& y2, T const& z2, T const& w2, + T const& x3, T const& y3, T const& z3, T const& w3); + GLM_FUNC_DECL GLM_CONSTEXPR mat( + col_type const& v0, + col_type const& v1, + col_type const& v2, + col_type const& v3); + + // -- Conversions -- + + template< + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2, + typename X3, typename Y3, typename Z3, typename W3, + typename X4, typename Y4, typename Z4, typename W4> + GLM_FUNC_DECL GLM_CONSTEXPR mat( + X1 const& x1, Y1 const& y1, Z1 const& z1, W1 const& w1, + X2 const& x2, Y2 const& y2, Z2 const& z2, W2 const& w2, + X3 const& x3, Y3 const& y3, Z3 const& z3, W3 const& w3, + X4 const& x4, Y4 const& y4, Z4 const& z4, W4 const& w4); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat( + vec<4, V1, Q> const& v1, + vec<4, V2, Q> const& v2, + vec<4, V3, Q> const& v3, + vec<4, V4, Q> const& v4); + + // -- Matrix conversions -- + + template + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, U, P> const& m); + + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator=(mat<4, 4, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator+=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator+=(mat<4, 4, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator-=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator-=(mat<4, 4, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator*=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator*=(mat<4, 4, U, Q> const& m); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator/=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator/=(mat<4, 4, U, Q> const& m); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator++(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator+(T scalar, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator-(T scalar, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator*(T scalar, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type operator*(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 4, T, Q>::row_type operator*(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator/(T scalar, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type operator/(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 4, T, Q>::row_type operator/(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat4x4.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4.inl new file mode 100644 index 000000000000..db77673163a7 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4.inl @@ -0,0 +1,706 @@ +#include "../matrix.hpp" + +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0, 0); + this->value[1] = col_type(0, 1, 0, 0); + this->value[2] = col_type(0, 0, 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 4, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0), col_type(0, 0, s, 0), col_type(0, 0, 0, s)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0, 0, 0); + this->value[1] = col_type(0, s, 0, 0); + this->value[2] = col_type(0, 0, s, 0); + this->value[3] = col_type(0, 0, 0, s); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat + ( + T const& x0, T const& y0, T const& z0, T const& w0, + T const& x1, T const& y1, T const& z1, T const& w1, + T const& x2, T const& y2, T const& z2, T const& w2, + T const& x3, T const& y3, T const& z3, T const& w3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{ + col_type(x0, y0, z0, w0), + col_type(x1, y1, z1, w1), + col_type(x2, y2, z2, w2), + col_type(x3, y3, z3, w3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0, w0); + this->value[1] = col_type(x1, y1, z1, w1); + this->value[2] = col_type(x2, y2, z2, w2); + this->value[3] = col_type(x3, y3, z3, w3); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; + this->value[2] = v2; + this->value[3] = v3; +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 4, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + // -- Conversions -- + + template + template< + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2, + typename X3, typename Y3, typename Z3, typename W3, + typename X4, typename Y4, typename Z4, typename W4> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat + ( + X1 const& x1, Y1 const& y1, Z1 const& z1, W1 const& w1, + X2 const& x2, Y2 const& y2, Z2 const& z2, W2 const& w2, + X3 const& x3, Y3 const& y3, Z3 const& z3, W3 const& w3, + X4 const& x4, Y4 const& y4, Z4 const& z4, W4 const& w4 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x1, y1, z1, w1), col_type(x2, y2, z2, w2), col_type(x3, y3, z3, w3), col_type(x4, y4, z4, w4)} +# endif + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 1st parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 2nd parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 3rd parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 4th parameter type invalid."); + + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 5th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 6th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 7th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 8th parameter type invalid."); + + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 9th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 10th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 11th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 12th parameter type invalid."); + + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 13th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 14th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 15th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 16th parameter type invalid."); + +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x1, y1, z1, w1); + this->value[1] = col_type(x2, y2, z2, w2); + this->value[2] = col_type(x3, y3, z3, w3); + this->value[3] = col_type(x4, y4, z4, w4); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(vec<4, V1, Q> const& v1, vec<4, V2, Q> const& v2, vec<4, V3, Q> const& v3, vec<4, V4, Q> const& v4) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2), col_type(v3), col_type(v4)} +# endif + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 1st parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 2nd parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 3rd parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 4th parameter type invalid."); + +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); + this->value[2] = col_type(v3); + this->value[3] = col_type(v4); +# endif + } + + // -- Matrix conversions -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(0, 0, 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(0, 0, 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(m[2], 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = col_type(0, 0, 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(0, 0, 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0), col_type(m[3], 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 0); + this->value[3] = col_type(m[3], 1); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type & mat<4, 4, T, Q>::operator[](typename mat<4, 4, T, Q>::length_type i) GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type const& mat<4, 4, T, Q>::operator[](typename mat<4, 4, T, Q>::length_type i) const GLM_NOEXCEPT + { + assert(i < this->length()); + return this->value[i]; + } + + // -- Unary arithmetic operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator=(mat<4, 4, U, Q> const& m) + { + //memcpy could be faster + //memcpy(&this->value, &m.value, 16 * sizeof(valType)); + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + this->value[3] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator+=(mat<4, 4, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + this->value[3] += m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + this->value[3] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator-=(mat<4, 4, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + this->value[3] -= m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + this->value[3] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator*=(mat<4, 4, U, Q> const& m) + { + return (*this = *this * m); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + this->value[3] /= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator/=(mat<4, 4, U, Q> const& m) + { + return *this *= inverse(m); + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + ++this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + --this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> mat<4, 4, T, Q>::operator++(int) + { + mat<4, 4, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> mat<4, 4, T, Q>::operator--(int) + { + mat<4, 4, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary constant operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + -m[0], + -m[1], + -m[2], + -m[3]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m, T scalar) + { + return mat<4, 4, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar, + m[3] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator+(T scalar, mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar, + m[3] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return mat<4, 4, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2], + m1[3] + m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m, T scalar) + { + return mat<4, 4, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar, + m[3] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator-(T scalar, mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + scalar - m[0], + scalar - m[1], + scalar - m[2], + scalar - m[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return mat<4, 4, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2], + m1[3] - m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m, T scalar) + { + return mat<4, 4, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator*(T scalar, mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type operator* + ( + mat<4, 4, T, Q> const& m, + typename mat<4, 4, T, Q>::row_type const& v + ) + { +/* + __m128 v0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 v1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 v2 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 v3 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(m[0].data, v0); + __m128 m1 = _mm_mul_ps(m[1].data, v1); + __m128 a0 = _mm_add_ps(m0, m1); + + __m128 m2 = _mm_mul_ps(m[2].data, v2); + __m128 m3 = _mm_mul_ps(m[3].data, v3); + __m128 a1 = _mm_add_ps(m2, m3); + + __m128 a2 = _mm_add_ps(a0, a1); + + return typename mat<4, 4, T, Q>::col_type(a2); +*/ + + typename mat<4, 4, T, Q>::col_type const Mov0(v[0]); + typename mat<4, 4, T, Q>::col_type const Mov1(v[1]); + typename mat<4, 4, T, Q>::col_type const Mul0 = m[0] * Mov0; + typename mat<4, 4, T, Q>::col_type const Mul1 = m[1] * Mov1; + typename mat<4, 4, T, Q>::col_type const Add0 = Mul0 + Mul1; + typename mat<4, 4, T, Q>::col_type const Mov2(v[2]); + typename mat<4, 4, T, Q>::col_type const Mov3(v[3]); + typename mat<4, 4, T, Q>::col_type const Mul2 = m[2] * Mov2; + typename mat<4, 4, T, Q>::col_type const Mul3 = m[3] * Mov3; + typename mat<4, 4, T, Q>::col_type const Add1 = Mul2 + Mul3; + typename mat<4, 4, T, Q>::col_type const Add2 = Add0 + Add1; + return Add2; + +/* + return typename mat<4, 4, T, Q>::col_type( + m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0] * v[3], + m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1] * v[3], + m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2] * v[3], + m[0][3] * v[0] + m[1][3] * v[1] + m[2][3] * v[2] + m[3][3] * v[3]); +*/ + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::row_type operator* + ( + typename mat<4, 4, T, Q>::col_type const& v, + mat<4, 4, T, Q> const& m + ) + { + return typename mat<4, 4, T, Q>::row_type( + m[0][0] * v[0] + m[0][1] * v[1] + m[0][2] * v[2] + m[0][3] * v[3], + m[1][0] * v[0] + m[1][1] * v[1] + m[1][2] * v[2] + m[1][3] * v[3], + m[2][0] * v[0] + m[2][1] * v[1] + m[2][2] * v[2] + m[2][3] * v[3], + m[3][0] * v[0] + m[3][1] * v[1] + m[3][2] * v[2] + m[3][3] * v[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return mat<2, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2] + m1[3][3] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2] + m1[3][3] * m2[1][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return mat<3, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2] + m1[3][3] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2] + m1[3][3] * m2[1][3], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2] + m1[3][2] * m2[2][3], + m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1] + m1[2][3] * m2[2][2] + m1[3][3] * m2[2][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + typename mat<4, 4, T, Q>::col_type const SrcA0 = m1[0]; + typename mat<4, 4, T, Q>::col_type const SrcA1 = m1[1]; + typename mat<4, 4, T, Q>::col_type const SrcA2 = m1[2]; + typename mat<4, 4, T, Q>::col_type const SrcA3 = m1[3]; + + typename mat<4, 4, T, Q>::col_type const SrcB0 = m2[0]; + typename mat<4, 4, T, Q>::col_type const SrcB1 = m2[1]; + typename mat<4, 4, T, Q>::col_type const SrcB2 = m2[2]; + typename mat<4, 4, T, Q>::col_type const SrcB3 = m2[3]; + + mat<4, 4, T, Q> Result; + Result[0] = SrcA0 * SrcB0[0] + SrcA1 * SrcB0[1] + SrcA2 * SrcB0[2] + SrcA3 * SrcB0[3]; + Result[1] = SrcA0 * SrcB1[0] + SrcA1 * SrcB1[1] + SrcA2 * SrcB1[2] + SrcA3 * SrcB1[3]; + Result[2] = SrcA0 * SrcB2[0] + SrcA1 * SrcB2[1] + SrcA2 * SrcB2[2] + SrcA3 * SrcB2[3]; + Result[3] = SrcA0 * SrcB3[0] + SrcA1 * SrcB3[1] + SrcA2 * SrcB3[2] + SrcA3 * SrcB3[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m, T scalar) + { + return mat<4, 4, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar, + m[3] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator/(T scalar, mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2], + scalar / m[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type operator/(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v) + { + return inverse(m) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::row_type operator/(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m) + { + return v * inverse(m); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + mat<4, 4, T, Q> m1_copy(m1); + return m1_copy /= m2; + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "type_mat4x4_simd.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4_simd.inl new file mode 100644 index 000000000000..fb3a16f06290 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_mat4x4_simd.inl @@ -0,0 +1,6 @@ +/// @ref core + +namespace glm +{ + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_quat.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_quat.hpp new file mode 100644 index 000000000000..d489e0a233af --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_quat.hpp @@ -0,0 +1,193 @@ +/// @ref core +/// @file glm/detail/type_quat.hpp + +#pragma once + +// Dependency: +#include "../detail/type_mat3x3.hpp" +#include "../detail/type_mat4x4.hpp" +#include "../detail/type_vec3.hpp" +#include "../detail/type_vec4.hpp" +#include "../ext/vector_relational.hpp" +#include "../ext/quaternion_relational.hpp" +#include "../gtc/constants.hpp" +#include "../gtc/matrix_transform.hpp" + +namespace glm +{ + template + struct qua + { + // -- Implementation detail -- + + typedef qua type; + typedef T value_type; + + // -- Data -- + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpedantic" +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +# pragma clang diagnostic ignored "-Wnested-anon-types" +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +# endif +# endif + +# if GLM_LANG & GLM_LANG_CXXMS_FLAG + union + { +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + struct { T w, x, y, z; }; +# else + struct { T x, y, z, w; }; +# endif + + typename detail::storage<4, T, detail::is_aligned::value>::type data; + }; +# else +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + T w, x, y, z; +# else + T x, y, z, w; +# endif +# endif + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif +# endif + + // -- Component accesses -- + + typedef length_t length_type; + + /// Return the count of components of a quaternion + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} + + GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); + GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR qua() GLM_DEFAULT_CTOR; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR qua(qua const& q) GLM_DEFAULT; + template + GLM_FUNC_DECL GLM_CONSTEXPR qua(qua const& q); + + // -- Explicit basic constructors -- + + GLM_FUNC_DECL GLM_CONSTEXPR qua(T s, vec<3, T, Q> const& v); + +# ifdef GLM_FORCE_QUAT_CTOR_XYZW + GLM_FUNC_DECL GLM_CONSTEXPR qua(T x, T y, T z, T w); +# else + GLM_FUNC_DECL GLM_CONSTEXPR qua(T w, T x, T y, T z); +# endif + + GLM_FUNC_DECL static GLM_CONSTEXPR qua wxyz(T w, T x, T y, T z); + + // -- Conversion constructors -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT qua(qua const& q); + + /// Explicit conversion operators +# if GLM_HAS_EXPLICIT_CONVERSION_OPERATORS + GLM_FUNC_DECL explicit operator mat<3, 3, T, Q>() const; + GLM_FUNC_DECL explicit operator mat<4, 4, T, Q>() const; +# endif + + /// Create a quaternion from two normalized axis + /// + /// @param u A first normalized axis + /// @param v A second normalized axis + /// @see gtc_quaternion + /// @see http://lolengine.net/blog/2013/09/18/beautiful-maths-quaternion-from-vectors + GLM_FUNC_DECL qua(vec<3, T, Q> const& u, vec<3, T, Q> const& v); + + /// Build a quaternion from euler angles (pitch, yaw, roll), in radians. + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT qua(vec<3, T, Q> const& eulerAngles); + GLM_FUNC_DECL GLM_EXPLICIT qua(mat<3, 3, T, Q> const& q); + GLM_FUNC_DECL GLM_EXPLICIT qua(mat<4, 4, T, Q> const& q); + + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR qua& operator=(qua const& q) GLM_DEFAULT; + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua& operator=(qua const& q); + template + GLM_FUNC_DECL GLM_CONSTEXPR qua& operator+=(qua const& q); + template + GLM_FUNC_DECL GLM_CONSTEXPR qua& operator-=(qua const& q); + template + GLM_FUNC_DECL GLM_CONSTEXPR qua& operator*=(qua const& q); + template + GLM_FUNC_DECL GLM_CONSTEXPR qua& operator*=(U s); + template + GLM_FUNC_DECL GLM_CONSTEXPR qua& operator/=(U s); + }; + + // -- Unary bit operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator+(qua const& q); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator-(qua const& q); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator+(qua const& q, qua const& p); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator-(qua const& q, qua const& p); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(qua const& q, qua const& p); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(qua const& q, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, qua const& q); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(qua const& q, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, qua const& q); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(qua const& q, T const& s); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(T const& s, qua const& q); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator/(qua const& q, T const& s); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(qua const& q1, qua const& q2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(qua const& q1, qua const& q2); +} //namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_quat.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_quat.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_quat.inl new file mode 100644 index 000000000000..3213ea6aacc9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_quat.inl @@ -0,0 +1,424 @@ +#include "../trigonometric.hpp" +#include "../exponential.hpp" +#include "../ext/quaternion_common.hpp" +#include "../ext/quaternion_geometric.hpp" +#include + +namespace glm{ +namespace detail +{ + template + struct genTypeTrait > + { + static const genTypeEnum GENTYPE = GENTYPE_QUAT; + }; + + template + struct compute_dot, T, Aligned> + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(qua const& a, qua const& b) + { + vec<4, T, Q> tmp(a.w * b.w, a.x * b.x, a.y * b.y, a.z * b.z); + return (tmp.x + tmp.y) + (tmp.z + tmp.w); + } + }; + + template + struct compute_quat_add + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, qua const& p) + { + return qua::wxyz(q.w + p.w, q.x + p.x, q.y + p.y, q.z + p.z); + } + }; + + template + struct compute_quat_sub + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, qua const& p) + { + return qua::wxyz(q.w - p.w, q.x - p.x, q.y - p.y, q.z - p.z); + } + }; + + template + struct compute_quat_mul_scalar + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, T s) + { + return qua::wxyz(q.w * s, q.x * s, q.y * s, q.z * s); + } + }; + + template + struct compute_quat_div_scalar + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, T s) + { + return qua::wxyz(q.w / s, q.x / s, q.y / s, q.z / s); + } + }; + + template + struct compute_quat_mul_vec4 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(qua const& q, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(q * vec<3, T, Q>(v), v.w); + } + }; +}//namespace detail + + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & qua::operator[](typename qua::length_type i) + { + assert(i >= 0 && i < this->length()); +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + return (&w)[i]; +# else + return (&x)[i]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& qua::operator[](typename qua::length_type i) const + { + assert(i >= 0 && i < this->length()); +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + return (&w)[i]; +# else + return (&x)[i]; +# endif + } + + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR qua::qua() +# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(1), x(0), y(0), z(0) +# else + : x(0), y(0), z(0), w(1) +# endif +# endif + {} +# endif + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(q.w), x(q.x), y(q.y), z(q.z) +# else + : x(q.x), y(q.y), z(q.z), w(q.w) +# endif + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(q.w), x(q.x), y(q.y), z(q.z) +# else + : x(q.x), y(q.y), z(q.z), w(q.w) +# endif + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T s, vec<3, T, Q> const& v) +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(s), x(v.x), y(v.y), z(v.z) +# else + : x(v.x), y(v.y), z(v.z), w(s) +# endif + {} + + template +# ifdef GLM_FORCE_QUAT_CTOR_XYZW + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T _x, T _y, T _z, T _w) +# else + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T _w, T _x, T _y, T _z) +# endif +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(_w), x(_x), y(_y), z(_z) +# else + : x(_x), y(_y), z(_z), w(_w) +# endif + {} + + template + GLM_CONSTEXPR qua qua::wxyz(T w, T x, T y, T z) { +# ifdef GLM_FORCE_QUAT_DATA_XYZW + return qua(x, y, z, w); +# else + return qua(w, x, y, z); +# endif + } + + // -- Conversion constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(static_cast(q.w)), x(static_cast(q.x)), y(static_cast(q.y)), z(static_cast(q.z)) +# else + : x(static_cast(q.x)), y(static_cast(q.y)), z(static_cast(q.z)), w(static_cast(q.w)) +# endif + {} + + //template + //GLM_FUNC_QUALIFIER qua::qua + //( + // valType const& pitch, + // valType const& yaw, + // valType const& roll + //) + //{ + // vec<3, valType> eulerAngle(pitch * valType(0.5), yaw * valType(0.5), roll * valType(0.5)); + // vec<3, valType> c = glm::cos(eulerAngle * valType(0.5)); + // vec<3, valType> s = glm::sin(eulerAngle * valType(0.5)); + // + // this->w = c.x * c.y * c.z + s.x * s.y * s.z; + // this->x = s.x * c.y * c.z - c.x * s.y * s.z; + // this->y = c.x * s.y * c.z + s.x * c.y * s.z; + // this->z = c.x * c.y * s.z - s.x * s.y * c.z; + //} + + template + GLM_FUNC_QUALIFIER qua::qua(vec<3, T, Q> const& u, vec<3, T, Q> const& v) + { + T norm_u_norm_v = sqrt(dot(u, u) * dot(v, v)); + T real_part = norm_u_norm_v + dot(u, v); + vec<3, T, Q> t; + + if(real_part < static_cast(1.e-6f) * norm_u_norm_v) + { + // If u and v are exactly opposite, rotate 180 degrees + // around an arbitrary orthogonal axis. Axis normalisation + // can happen later, when we normalise the quaternion. + real_part = static_cast(0); + t = abs(u.x) > abs(u.z) ? vec<3, T, Q>(-u.y, u.x, static_cast(0)) : vec<3, T, Q>(static_cast(0), -u.z, u.y); + } + else + { + // Otherwise, build quaternion the standard way. + t = cross(u, v); + } + + *this = normalize(qua::wxyz(real_part, t.x, t.y, t.z)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(vec<3, T, Q> const& eulerAngle) + { + vec<3, T, Q> c = glm::cos(eulerAngle * T(0.5)); + vec<3, T, Q> s = glm::sin(eulerAngle * T(0.5)); + + this->w = c.x * c.y * c.z + s.x * s.y * s.z; + this->x = s.x * c.y * c.z - c.x * s.y * s.z; + this->y = c.x * s.y * c.z + s.x * c.y * s.z; + this->z = c.x * c.y * s.z - s.x * s.y * c.z; + } + + template + GLM_FUNC_QUALIFIER qua::qua(mat<3, 3, T, Q> const& m) + { + *this = quat_cast(m); + } + + template + GLM_FUNC_QUALIFIER qua::qua(mat<4, 4, T, Q> const& m) + { + *this = quat_cast(m); + } + +# if GLM_HAS_EXPLICIT_CONVERSION_OPERATORS + template + GLM_FUNC_QUALIFIER qua::operator mat<3, 3, T, Q>() const + { + return mat3_cast(*this); + } + + template + GLM_FUNC_QUALIFIER qua::operator mat<4, 4, T, Q>() const + { + return mat4_cast(*this); + } +# endif//GLM_HAS_EXPLICIT_CONVERSION_OPERATORS + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator=(qua const& q) + { + this->w = q.w; + this->x = q.x; + this->y = q.y; + this->z = q.z; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator=(qua const& q) + { + this->w = static_cast(q.w); + this->x = static_cast(q.x); + this->y = static_cast(q.y); + this->z = static_cast(q.z); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator+=(qua const& q) + { + return (*this = detail::compute_quat_add::value>::call(*this, qua(q))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator-=(qua const& q) + { + return (*this = detail::compute_quat_sub::value>::call(*this, qua(q))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator*=(qua const& r) + { + qua const p(*this); + qua const q(r); + + this->w = p.w * q.w - p.x * q.x - p.y * q.y - p.z * q.z; + this->x = p.w * q.x + p.x * q.w + p.y * q.z - p.z * q.y; + this->y = p.w * q.y + p.y * q.w + p.z * q.x - p.x * q.z; + this->z = p.w * q.z + p.z * q.w + p.x * q.y - p.y * q.x; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator*=(U s) + { + return (*this = detail::compute_quat_mul_scalar::value>::call(*this, static_cast(s))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator/=(U s) + { + return (*this = detail::compute_quat_div_scalar::value>::call(*this, static_cast(s))); + } + + // -- Unary bit operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator+(qua const& q) + { + return q; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator-(qua const& q) + { + return qua::wxyz(-q.w, -q.x, -q.y, -q.z); + } + + // -- Binary operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator+(qua const& q, qua const& p) + { + return qua(q) += p; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator-(qua const& q, qua const& p) + { + return qua(q) -= p; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(qua const& q, qua const& p) + { + return qua(q) *= p; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(qua const& q, vec<3, T, Q> const& v) + { + vec<3, T, Q> const QuatVector(q.x, q.y, q.z); + vec<3, T, Q> const uv(glm::cross(QuatVector, v)); + vec<3, T, Q> const uuv(glm::cross(QuatVector, uv)); + + return v + ((uv * q.w) + uuv) * static_cast(2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, qua const& q) + { + return glm::inverse(q) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(qua const& q, vec<4, T, Q> const& v) + { + return detail::compute_quat_mul_vec4::value>::call(q, v); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, qua const& q) + { + return glm::inverse(q) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(qua const& q, T const& s) + { + return qua::wxyz( + q.w * s, q.x * s, q.y * s, q.z * s); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(T const& s, qua const& q) + { + return q * s; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator/(qua const& q, T const& s) + { + return qua::wxyz( + q.w / s, q.x / s, q.y / s, q.z / s); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(qua const& q1, qua const& q2) + { + return q1.x == q2.x && q1.y == q2.y && q1.z == q2.z && q1.w == q2.w; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(qua const& q1, qua const& q2) + { + return q1.x != q2.x || q1.y != q2.y || q1.z != q2.z || q1.w != q2.w; + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "type_quat_simd.inl" +#endif + diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_quat_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_quat_simd.inl new file mode 100644 index 000000000000..fa6da198326c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_quat_simd.inl @@ -0,0 +1,208 @@ +/// @ref core + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm{ +namespace detail +{ +/* + template + struct compute_quat_mul + { + static qua call(qua const& q1, qua const& q2) + { + // SSE2 STATS: 11 shuffle, 8 mul, 8 add + // SSE4 STATS: 3 shuffle, 4 mul, 4 dpps + + __m128 const mul0 = _mm_mul_ps(q1.data, _mm_shuffle_ps(q2.data, q2.data, _MM_SHUFFLE(0, 1, 2, 3))); + __m128 const mul1 = _mm_mul_ps(q1.data, _mm_shuffle_ps(q2.data, q2.data, _MM_SHUFFLE(1, 0, 3, 2))); + __m128 const mul2 = _mm_mul_ps(q1.data, _mm_shuffle_ps(q2.data, q2.data, _MM_SHUFFLE(2, 3, 0, 1))); + __m128 const mul3 = _mm_mul_ps(q1.data, q2.data); + +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + __m128 const add0 = _mm_dp_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f), 0xff); + __m128 const add1 = _mm_dp_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f), 0xff); + __m128 const add2 = _mm_dp_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f), 0xff); + __m128 const add3 = _mm_dp_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f), 0xff); +# else + __m128 const mul4 = _mm_mul_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f)); + __m128 const add0 = _mm_add_ps(mul0, _mm_movehl_ps(mul4, mul4)); + __m128 const add4 = _mm_add_ss(add0, _mm_shuffle_ps(add0, add0, 1)); + + __m128 const mul5 = _mm_mul_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f)); + __m128 const add1 = _mm_add_ps(mul1, _mm_movehl_ps(mul5, mul5)); + __m128 const add5 = _mm_add_ss(add1, _mm_shuffle_ps(add1, add1, 1)); + + __m128 const mul6 = _mm_mul_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f)); + __m128 const add2 = _mm_add_ps(mul6, _mm_movehl_ps(mul6, mul6)); + __m128 const add6 = _mm_add_ss(add2, _mm_shuffle_ps(add2, add2, 1)); + + __m128 const mul7 = _mm_mul_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f)); + __m128 const add3 = _mm_add_ps(mul3, _mm_movehl_ps(mul7, mul7)); + __m128 const add7 = _mm_add_ss(add3, _mm_shuffle_ps(add3, add3, 1)); + #endif + + // This SIMD code is a politically correct way of doing this, but in every test I've tried it has been slower than + // the final code below. I'll keep this here for reference - maybe somebody else can do something better... + // + //__m128 xxyy = _mm_shuffle_ps(add4, add5, _MM_SHUFFLE(0, 0, 0, 0)); + //__m128 zzww = _mm_shuffle_ps(add6, add7, _MM_SHUFFLE(0, 0, 0, 0)); + // + //return _mm_shuffle_ps(xxyy, zzww, _MM_SHUFFLE(2, 0, 2, 0)); + + qua Result; + _mm_store_ss(&Result.x, add4); + _mm_store_ss(&Result.y, add5); + _mm_store_ss(&Result.z, add6); + _mm_store_ss(&Result.w, add7); + return Result; + } + }; +*/ + + template + struct compute_quat_add + { + static qua call(qua const& q, qua const& p) + { + qua Result; + Result.data = _mm_add_ps(q.data, p.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_quat_add + { + static qua call(qua const& a, qua const& b) + { + qua Result; + Result.data = _mm256_add_pd(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_quat_sub + { + static qua call(qua const& q, qua const& p) + { + qua Result; + Result.data = _mm_sub_ps(q.data, p.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_quat_sub + { + static qua call(qua const& a, qua const& b) + { + qua Result; + Result.data = _mm256_sub_pd(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_quat_mul_scalar + { + static qua call(qua const& q, float s) + { + vec<4, float, Q> Result; + Result.data = _mm_mul_ps(q.data, _mm_set_ps1(s)); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_quat_mul_scalar + { + static qua call(qua const& q, double s) + { + qua Result; + Result.data = _mm256_mul_pd(q.data, _mm_set_ps1(s)); + return Result; + } + }; +# endif + + template + struct compute_quat_div_scalar + { + static qua call(qua const& q, float s) + { + vec<4, float, Q> Result; + Result.data = _mm_div_ps(q.data, _mm_set_ps1(s)); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_quat_div_scalar + { + static qua call(qua const& q, double s) + { + qua Result; + Result.data = _mm256_div_pd(q.data, _mm_set_ps1(s)); + return Result; + } + }; +# endif + + template + struct compute_quat_mul_vec4 + { + static vec<4, float, Q> call(qua const& q, vec<4, float, Q> const& v) + { +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + __m128 const q_wwww = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 const q_swp0 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(0, 1, 3, 2)); + __m128 const q_swp1 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(0, 2, 1, 3)); + __m128 const v_swp0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 const v_swp1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 1, 0, 2)); + + __m128 uv = _mm_sub_ps(_mm_mul_ps(q_swp0, v_swp1), _mm_mul_ps(q_swp1, v_swp0)); + __m128 uv_swp0 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 uv_swp1 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 1, 0, 2)); + __m128 uuv = _mm_sub_ps(_mm_mul_ps(q_swp0, uv_swp1), _mm_mul_ps(q_swp1, uv_swp0)); + + __m128 const two = _mm_set1_ps(2.0f); + uv = _mm_mul_ps(uv, _mm_mul_ps(q_wwww, two)); + uuv = _mm_mul_ps(uuv, two); + + vec<4, float, Q> Result; + Result.data = _mm_add_ps(v.data, _mm_add_ps(uv, uuv)); + return Result; +# else + __m128 const q_wwww = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 const q_swp0 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 const q_swp1 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 1, 0, 2)); + __m128 const v_swp0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 const v_swp1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 1, 0, 2)); + + __m128 uv = _mm_sub_ps(_mm_mul_ps(q_swp0, v_swp1), _mm_mul_ps(q_swp1, v_swp0)); + __m128 uv_swp0 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 uv_swp1 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 1, 0, 2)); + __m128 uuv = _mm_sub_ps(_mm_mul_ps(q_swp0, uv_swp1), _mm_mul_ps(q_swp1, uv_swp0)); + + __m128 const two = _mm_set1_ps(2.0f); + uv = _mm_mul_ps(uv, _mm_mul_ps(q_wwww, two)); + uuv = _mm_mul_ps(uuv, two); + + vec<4, float, Q> Result; + Result.data = _mm_add_ps(v.data, _mm_add_ps(uv, uuv)); + return Result; +# endif + } + }; +}//namespace detail +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec1.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec1.hpp new file mode 100644 index 000000000000..91232f9cbabe --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec1.hpp @@ -0,0 +1,308 @@ +/// @ref core +/// @file glm/detail/type_vec1.hpp + +#pragma once + +#include "qualifier.hpp" +#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +# include "_swizzle.hpp" +#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION +# include "_swizzle_func.hpp" +#endif +#include + +namespace glm +{ + template + struct vec<1, T, Q> + { + // -- Implementation detail -- + + typedef T value_type; + typedef vec<1, T, Q> type; + typedef vec<1, bool, Q> bool_type; + + // -- Data -- + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpedantic" +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +# pragma clang diagnostic ignored "-Wnested-anon-types" +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +# endif +# endif + +# if GLM_CONFIG_XYZW_ONLY + T x; +# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE + union + { + T x; + T r; + T s; + + typename detail::storage<1, T, detail::is_aligned::value>::type data; +/* +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + _GLM_SWIZZLE1_2_MEMBERS(T, Q, x) + _GLM_SWIZZLE1_2_MEMBERS(T, Q, r) + _GLM_SWIZZLE1_2_MEMBERS(T, Q, s) + _GLM_SWIZZLE1_3_MEMBERS(T, Q, x) + _GLM_SWIZZLE1_3_MEMBERS(T, Q, r) + _GLM_SWIZZLE1_3_MEMBERS(T, Q, s) + _GLM_SWIZZLE1_4_MEMBERS(T, Q, x) + _GLM_SWIZZLE1_4_MEMBERS(T, Q, r) + _GLM_SWIZZLE1_4_MEMBERS(T, Q, s) +# endif +*/ + }; +# else + union {T x, r, s;}; +/* +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC1(T, Q) +# endif +*/ +# endif + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif +# endif + + // -- Component accesses -- + + /// Return the count of components of the vector + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 1;} + + GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); + GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, T, P> const& v); + + // -- Explicit basic constructors -- + + GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); + + // -- Conversion vector constructors -- + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<2, U, P> const& v); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<1, U, P> const& v); + + // -- Swizzle constructors -- +/* +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<1, T, Q, E0, -1,-2,-3> const& that) + { + *this = that(); + } +# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +*/ + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator=(vec const& v) GLM_DEFAULT; + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator+=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator+=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator-=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator-=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator*=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator*=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator/=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator/=(vec<1, U, Q> const& v); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator++(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator--(int); + + // -- Unary bit operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator%=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator%=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator&=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator&=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator|=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator|=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator^=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator^=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator<<=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator<<=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator>>=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator>>=(vec<1, U, Q> const& v); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator~(vec<1, T, Q> const& v); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, bool, Q> operator&&(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, bool, Q> operator||(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_vec1.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec1.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec1.inl new file mode 100644 index 000000000000..18411e7f76c5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec1.inl @@ -0,0 +1,553 @@ +/// @ref core + +#include "./compute_vector_relational.hpp" + +namespace glm +{ + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec() +# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE + : x(0) +# endif + {} +# endif + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, T, Q> const& v) + : x(v.x) + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, T, P> const& v) + : x(v.x) + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(T scalar) + : x(scalar) + {} + + // -- Conversion vector constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, U, P> const& v) + : x(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<2, U, P> const& v) + : x(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<3, U, P> const& v) + : x(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<4, U, P> const& v) + : x(static_cast(v.x)) + {} + + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<1, T, Q>::operator[](typename vec<1, T, Q>::length_type) + { + return x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<1, T, Q>::operator[](typename vec<1, T, Q>::length_type) const + { + return x; + } + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator=(vec<1, T, Q> const& v) + { + this->x = v.x; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator=(vec<1, U, Q> const& v) + { + this->x = static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator+=(U scalar) + { + this->x += static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator+=(vec<1, U, Q> const& v) + { + this->x += static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator-=(U scalar) + { + this->x -= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator-=(vec<1, U, Q> const& v) + { + this->x -= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator*=(U scalar) + { + this->x *= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator*=(vec<1, U, Q> const& v) + { + this->x *= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator/=(U scalar) + { + this->x /= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator/=(vec<1, U, Q> const& v) + { + this->x /= static_cast(v.x); + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator++() + { + ++this->x; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator--() + { + --this->x; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> vec<1, T, Q>::operator++(int) + { + vec<1, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> vec<1, T, Q>::operator--(int) + { + vec<1, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary bit operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator%=(U scalar) + { + this->x %= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator%=(vec<1, U, Q> const& v) + { + this->x %= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator&=(U scalar) + { + this->x &= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator&=(vec<1, U, Q> const& v) + { + this->x &= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator|=(U scalar) + { + this->x |= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator|=(vec<1, U, Q> const& v) + { + this->x |= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator^=(U scalar) + { + this->x ^= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator^=(vec<1, U, Q> const& v) + { + this->x ^= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator<<=(U scalar) + { + this->x <<= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator<<=(vec<1, U, Q> const& v) + { + this->x <<= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator>>=(U scalar) + { + this->x >>= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator>>=(vec<1, U, Q> const& v) + { + this->x >>= static_cast(v.x); + return *this; + } + + // -- Unary constant operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + -v.x); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar + v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x + v2.x); + } + + //operator- + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar - v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x - v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar * v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x * v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar / v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x / v2.x); + } + + // -- Binary bit operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x % scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar % v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x % v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x & scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar & v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x & v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x | scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar | v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x | v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x ^ scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar ^ v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x ^ v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + static_cast(v.x << scalar)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + static_cast(scalar << v.x)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + static_cast(v1.x << v2.x)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + static_cast(v.x >> scalar)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + static_cast(scalar >> v.x)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + static_cast(v1.x >> v2.x)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator~(vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + ~v.x); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return detail::compute_equal::is_iec559>::call(v1.x, v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return !(v1 == v2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, bool, Q> operator&&(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2) + { + return vec<1, bool, Q>(v1.x && v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, bool, Q> operator||(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2) + { + return vec<1, bool, Q>(v1.x || v2.x); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec2.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec2.hpp new file mode 100644 index 000000000000..424868f555c5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec2.hpp @@ -0,0 +1,402 @@ +/// @ref core +/// @file glm/detail/type_vec2.hpp + +#pragma once + +#include "qualifier.hpp" +#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +# include "_swizzle.hpp" +#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION +# include "_swizzle_func.hpp" +#endif +#include + +namespace glm +{ + template + struct vec<2, T, Q> + { + // -- Implementation detail -- + + typedef T value_type; + typedef vec<2, T, Q> type; + typedef vec<2, bool, Q> bool_type; + + // -- Data -- + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpedantic" +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +# pragma clang diagnostic ignored "-Wnested-anon-types" +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +# endif +# endif + +# if GLM_CONFIG_XYZW_ONLY + T x, y; +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, Q, x, y) +# endif//GLM_CONFIG_SWIZZLE +# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE + union + { + struct{ T x, y; }; + struct{ T r, g; }; + struct{ T s, t; }; + + typename detail::storage<2, T, detail::is_aligned::value>::type data; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + GLM_SWIZZLE2_2_MEMBERS(T, Q, x, y) + GLM_SWIZZLE2_2_MEMBERS(T, Q, r, g) + GLM_SWIZZLE2_2_MEMBERS(T, Q, s, t) + GLM_SWIZZLE2_3_MEMBERS(T, Q, x, y) + GLM_SWIZZLE2_3_MEMBERS(T, Q, r, g) + GLM_SWIZZLE2_3_MEMBERS(T, Q, s, t) + GLM_SWIZZLE2_4_MEMBERS(T, Q, x, y) + GLM_SWIZZLE2_4_MEMBERS(T, Q, r, g) + GLM_SWIZZLE2_4_MEMBERS(T, Q, s, t) +# endif + }; +# else + union {T x, r, s;}; + union {T y, g, t;}; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, Q) +# endif//GLM_CONFIG_SWIZZLE +# endif + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif +# endif + + // -- Component accesses -- + + /// Return the count of components of the vector + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;} + + GLM_FUNC_DECL GLM_CONSTEXPR T& operator[](length_type i); + GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, T, P> const& v); + + // -- Explicit basic constructors -- + + GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); + GLM_FUNC_DECL GLM_CONSTEXPR vec(T x, T y); + + // -- Conversion constructors -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(A x, B y); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, Q> const& x, B y); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(A x, vec<1, B, Q> const& y); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, Q> const& x, vec<1, B, Q> const& y); + + // -- Conversion vector constructors -- + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<2, U, P> const& v); + + // -- Swizzle constructors -- +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1,-1,-2> const& that) + { + *this = that(); + } +# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator=(vec const& v) GLM_DEFAULT; + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator=(vec<2, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(vec<2, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(vec<2, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(vec<2, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(vec<2, U, Q> const& v); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator++(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator--(int); + + // -- Unary bit operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(vec<2, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(vec<2, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(vec<2, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(vec<2, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(vec<2, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(vec<2, U, Q> const& v); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator~(vec<2, T, Q> const& v); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, bool, Q> operator&&(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, bool, Q> operator||(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_vec2.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec2.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec2.inl new file mode 100644 index 000000000000..24850965b510 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec2.inl @@ -0,0 +1,915 @@ +/// @ref core + +#include "./compute_vector_relational.hpp" + +namespace glm +{ + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec() +# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE + : x(0), y(0) +# endif + {} +# endif + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, T, Q> const& v) + : x(v.x), y(v.y) + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, T, P> const& v) + : x(v.x), y(v.y) + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(T scalar) + : x(scalar), y(scalar) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(T _x, T _y) + : x(_x), y(_y) + {} + + // -- Conversion scalar constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(A _x, B _y) + : x(static_cast(_x)) + , y(static_cast(_y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, A, Q> const& _x, B _y) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(A _x, vec<1, B, Q> const& _y) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, A, Q> const& _x, vec<1, B, Q> const& _y) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + {} + + // -- Conversion vector constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<3, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<4, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + {} + + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<2, T, Q>::operator[](typename vec<2, T, Q>::length_type i) + { + assert(i >= 0 && i < this->length()); + switch(i) + { + default: + case 0: + return x; + case 1: + return y; + } + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<2, T, Q>::operator[](typename vec<2, T, Q>::length_type i) const + { + assert(i >= 0 && i < this->length()); + switch(i) + { + default: + case 0: + return x; + case 1: + return y; + } + } + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator=(vec<2, T, Q> const& v) + { + this->x = v.x; + this->y = v.y; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator=(vec<2, U, Q> const& v) + { + this->x = static_cast(v.x); + this->y = static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(U scalar) + { + this->x += static_cast(scalar); + this->y += static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(vec<1, U, Q> const& v) + { + this->x += static_cast(v.x); + this->y += static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(vec<2, U, Q> const& v) + { + this->x += static_cast(v.x); + this->y += static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(U scalar) + { + this->x -= static_cast(scalar); + this->y -= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(vec<1, U, Q> const& v) + { + this->x -= static_cast(v.x); + this->y -= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(vec<2, U, Q> const& v) + { + this->x -= static_cast(v.x); + this->y -= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(U scalar) + { + this->x *= static_cast(scalar); + this->y *= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(vec<1, U, Q> const& v) + { + this->x *= static_cast(v.x); + this->y *= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(vec<2, U, Q> const& v) + { + this->x *= static_cast(v.x); + this->y *= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(U scalar) + { + this->x /= static_cast(scalar); + this->y /= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(vec<1, U, Q> const& v) + { + this->x /= static_cast(v.x); + this->y /= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(vec<2, U, Q> const& v) + { + this->x /= static_cast(v.x); + this->y /= static_cast(v.y); + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator++() + { + ++this->x; + ++this->y; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator--() + { + --this->x; + --this->y; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> vec<2, T, Q>::operator++(int) + { + vec<2, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> vec<2, T, Q>::operator--(int) + { + vec<2, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary bit operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(U scalar) + { + this->x %= static_cast(scalar); + this->y %= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(vec<1, U, Q> const& v) + { + this->x %= static_cast(v.x); + this->y %= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(vec<2, U, Q> const& v) + { + this->x %= static_cast(v.x); + this->y %= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(U scalar) + { + this->x &= static_cast(scalar); + this->y &= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(vec<1, U, Q> const& v) + { + this->x &= static_cast(v.x); + this->y &= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(vec<2, U, Q> const& v) + { + this->x &= static_cast(v.x); + this->y &= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(U scalar) + { + this->x |= static_cast(scalar); + this->y |= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(vec<1, U, Q> const& v) + { + this->x |= static_cast(v.x); + this->y |= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(vec<2, U, Q> const& v) + { + this->x |= static_cast(v.x); + this->y |= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(U scalar) + { + this->x ^= static_cast(scalar); + this->y ^= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(vec<1, U, Q> const& v) + { + this->x ^= static_cast(v.x); + this->y ^= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(vec<2, U, Q> const& v) + { + this->x ^= static_cast(v.x); + this->y ^= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(U scalar) + { + this->x <<= static_cast(scalar); + this->y <<= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(vec<1, U, Q> const& v) + { + this->x <<= static_cast(v.x); + this->y <<= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(vec<2, U, Q> const& v) + { + this->x <<= static_cast(v.x); + this->y <<= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(U scalar) + { + this->x >>= static_cast(scalar); + this->y >>= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(vec<1, U, Q> const& v) + { + this->x >>= static_cast(v.x); + this->y >>= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(vec<2, U, Q> const& v) + { + this->x >>= static_cast(v.x); + this->y >>= static_cast(v.y); + return *this; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + -v.x, + -v.y); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x + scalar, + v.y + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x + v2.x, + v1.y + v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar + v.x, + scalar + v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x + v2.x, + v1.x + v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x + v2.x, + v1.y + v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x - scalar, + v.y - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x - v2.x, + v1.y - v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar - v.x, + scalar - v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x - v2.x, + v1.x - v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x - v2.x, + v1.y - v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x * scalar, + v.y * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x * v2.x, + v1.y * v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar * v.x, + scalar * v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x * v2.x, + v1.x * v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x * v2.x, + v1.y * v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x / scalar, + v.y / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x / v2.x, + v1.y / v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar / v.x, + scalar / v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x / v2.x, + v1.x / v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x / v2.x, + v1.y / v2.y); + } + + // -- Binary bit operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x % scalar, + v.y % scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x % v2.x, + v1.y % v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar % v.x, + scalar % v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x % v2.x, + v1.x % v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x % v2.x, + v1.y % v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x & scalar, + v.y & scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x & v2.x, + v1.y & v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar & v.x, + scalar & v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x & v2.x, + v1.x & v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x & v2.x, + v1.y & v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x | scalar, + v.y | scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x | v2.x, + v1.y | v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar | v.x, + scalar | v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x | v2.x, + v1.x | v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x | v2.x, + v1.y | v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x ^ scalar, + v.y ^ scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x ^ v2.x, + v1.y ^ v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar ^ v.x, + scalar ^ v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x ^ v2.x, + v1.x ^ v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x ^ v2.x, + v1.y ^ v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x << scalar, + v.y << scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x << v2.x, + v1.y << v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar << v.x, + scalar << v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x << v2.x, + v1.x << v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x << v2.x, + v1.y << v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x >> scalar, + v.y >> scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x >> v2.x, + v1.y >> v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar >> v.x, + scalar >> v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x >> v2.x, + v1.x >> v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x >> v2.x, + v1.y >> v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator~(vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + ~v.x, + ~v.y); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return + detail::compute_equal::is_iec559>::call(v1.x, v2.x) && + detail::compute_equal::is_iec559>::call(v1.y, v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return !(v1 == v2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, bool, Q> operator&&(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2) + { + return vec<2, bool, Q>(v1.x && v2.x, v1.y && v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, bool, Q> operator||(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2) + { + return vec<2, bool, Q>(v1.x || v2.x, v1.y || v2.y); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec3.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec3.hpp new file mode 100644 index 000000000000..67104800cfd9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec3.hpp @@ -0,0 +1,435 @@ +/// @ref core +/// @file glm/detail/type_vec3.hpp + +#pragma once + +#include "qualifier.hpp" +#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +# include "_swizzle.hpp" +#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION +# include "_swizzle_func.hpp" +#endif +#include + +namespace glm +{ + template + struct vec<3, T, Q> + { + // -- Implementation detail -- + + typedef T value_type; + typedef vec<3, T, Q> type; + typedef vec<3, bool, Q> bool_type; + + // -- Data -- + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpedantic" +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +# pragma clang diagnostic ignored "-Wnested-anon-types" +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE +# pragma warning(disable: 4324) // structure was padded due to alignment specifier +# endif +# endif +# endif + +# if GLM_CONFIG_XYZW_ONLY + T x, y, z; +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, Q, x, y, z) +# endif//GLM_CONFIG_SWIZZLE +# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE + union + { + struct{ T x, y, z; }; + struct{ T r, g, b; }; + struct{ T s, t, p; }; + + typename detail::storage<3, T, detail::is_aligned::value>::type data; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + GLM_SWIZZLE3_2_MEMBERS(T, Q, x, y, z) + GLM_SWIZZLE3_2_MEMBERS(T, Q, r, g, b) + GLM_SWIZZLE3_2_MEMBERS(T, Q, s, t, p) + GLM_SWIZZLE3_3_MEMBERS(T, Q, x, y, z) + GLM_SWIZZLE3_3_MEMBERS(T, Q, r, g, b) + GLM_SWIZZLE3_3_MEMBERS(T, Q, s, t, p) + GLM_SWIZZLE3_4_MEMBERS(T, Q, x, y, z) + GLM_SWIZZLE3_4_MEMBERS(T, Q, r, g, b) + GLM_SWIZZLE3_4_MEMBERS(T, Q, s, t, p) +# endif + }; +# else + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, Q) +# endif//GLM_CONFIG_SWIZZLE +# endif//GLM_LANG + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif +# endif + + // -- Component accesses -- + + /// Return the count of components of the vector + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 3;} + + GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); + GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, T, P> const& v); + + // -- Explicit basic constructors -- + + GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); + GLM_FUNC_DECL GLM_CONSTEXPR vec(T a, T b, T c); + + // -- Conversion scalar constructors -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X x, Y y, Z z); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z); + + // -- Conversion vector constructors -- + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v); + + // -- Swizzle constructors -- +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& that) + { + *this = that(); + } + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& scalar) + { + *this = vec(v(), scalar); + } + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& scalar, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v) + { + *this = vec(scalar, v()); + } +# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q>& operator=(vec<3, T, Q> const& v) GLM_DEFAULT; + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator=(vec<3, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(vec<3, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(vec<3, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(vec<3, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(vec<3, U, Q> const& v); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator++(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator--(int); + + // -- Unary bit operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(vec<3, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(vec<3, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(vec<3, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(vec<3, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(vec<3, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(vec<3, U, Q> const& v); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator~(vec<3, T, Q> const& v); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, bool, Q> operator&&(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, bool, Q> operator||(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_vec3.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec3.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec3.inl new file mode 100644 index 000000000000..5a258d1f9b88 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec3.inl @@ -0,0 +1,1070 @@ +/// @ref core + +#include "compute_vector_relational.hpp" + +namespace glm +{ + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec() +# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE + : x(0), y(0), z(0) +# endif + {} +# endif + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, T, Q> const& v) + : x(v.x), y(v.y), z(v.z) + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, T, P> const& v) + : x(v.x), y(v.y), z(v.z) + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(T scalar) + : x(scalar), y(scalar), z(scalar) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(T _x, T _y, T _z) + : x(_x), y(_y), z(_z) + {} + + // -- Conversion scalar constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.x)) + , z(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, Y _y, Z _z) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + {} + + // -- Conversion vector constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<2, A, P> const& _xy, B _z) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(A _x, vec<2, B, P> const& _yz) + : x(static_cast(_x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz) + : x(static_cast(_x.x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + , z(static_cast(v.z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<4, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + , z(static_cast(v.z)) + {} + + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<3, T, Q>::operator[](typename vec<3, T, Q>::length_type i) + { + assert(i >= 0 && i < this->length()); + switch(i) + { + default: + case 0: + return x; + case 1: + return y; + case 2: + return z; + } + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<3, T, Q>::operator[](typename vec<3, T, Q>::length_type i) const + { + assert(i >= 0 && i < this->length()); + switch(i) + { + default: + case 0: + return x; + case 1: + return y; + case 2: + return z; + } + } + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>& vec<3, T, Q>::operator=(vec<3, T, Q> const& v) + { + this->x = v.x; + this->y = v.y; + this->z = v.z; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>& vec<3, T, Q>::operator=(vec<3, U, Q> const& v) + { + this->x = static_cast(v.x); + this->y = static_cast(v.y); + this->z = static_cast(v.z); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(U scalar) + { + this->x += static_cast(scalar); + this->y += static_cast(scalar); + this->z += static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(vec<1, U, Q> const& v) + { + this->x += static_cast(v.x); + this->y += static_cast(v.x); + this->z += static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(vec<3, U, Q> const& v) + { + this->x += static_cast(v.x); + this->y += static_cast(v.y); + this->z += static_cast(v.z); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(U scalar) + { + this->x -= static_cast(scalar); + this->y -= static_cast(scalar); + this->z -= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(vec<1, U, Q> const& v) + { + this->x -= static_cast(v.x); + this->y -= static_cast(v.x); + this->z -= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(vec<3, U, Q> const& v) + { + this->x -= static_cast(v.x); + this->y -= static_cast(v.y); + this->z -= static_cast(v.z); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(U scalar) + { + this->x *= static_cast(scalar); + this->y *= static_cast(scalar); + this->z *= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(vec<1, U, Q> const& v) + { + this->x *= static_cast(v.x); + this->y *= static_cast(v.x); + this->z *= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(vec<3, U, Q> const& v) + { + this->x *= static_cast(v.x); + this->y *= static_cast(v.y); + this->z *= static_cast(v.z); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(U v) + { + this->x /= static_cast(v); + this->y /= static_cast(v); + this->z /= static_cast(v); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(vec<1, U, Q> const& v) + { + this->x /= static_cast(v.x); + this->y /= static_cast(v.x); + this->z /= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(vec<3, U, Q> const& v) + { + this->x /= static_cast(v.x); + this->y /= static_cast(v.y); + this->z /= static_cast(v.z); + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator++() + { + ++this->x; + ++this->y; + ++this->z; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator--() + { + --this->x; + --this->y; + --this->z; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> vec<3, T, Q>::operator++(int) + { + vec<3, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> vec<3, T, Q>::operator--(int) + { + vec<3, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary bit operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(U scalar) + { + this->x %= scalar; + this->y %= scalar; + this->z %= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(vec<1, U, Q> const& v) + { + this->x %= v.x; + this->y %= v.x; + this->z %= v.x; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(vec<3, U, Q> const& v) + { + this->x %= v.x; + this->y %= v.y; + this->z %= v.z; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(U scalar) + { + this->x &= scalar; + this->y &= scalar; + this->z &= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(vec<1, U, Q> const& v) + { + this->x &= v.x; + this->y &= v.x; + this->z &= v.x; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(vec<3, U, Q> const& v) + { + this->x &= v.x; + this->y &= v.y; + this->z &= v.z; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(U scalar) + { + this->x |= scalar; + this->y |= scalar; + this->z |= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(vec<1, U, Q> const& v) + { + this->x |= v.x; + this->y |= v.x; + this->z |= v.x; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(vec<3, U, Q> const& v) + { + this->x |= v.x; + this->y |= v.y; + this->z |= v.z; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(U scalar) + { + this->x ^= scalar; + this->y ^= scalar; + this->z ^= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(vec<1, U, Q> const& v) + { + this->x ^= v.x; + this->y ^= v.x; + this->z ^= v.x; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(vec<3, U, Q> const& v) + { + this->x ^= v.x; + this->y ^= v.y; + this->z ^= v.z; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(U scalar) + { + this->x <<= scalar; + this->y <<= scalar; + this->z <<= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(vec<1, U, Q> const& v) + { + this->x <<= static_cast(v.x); + this->y <<= static_cast(v.x); + this->z <<= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(vec<3, U, Q> const& v) + { + this->x <<= static_cast(v.x); + this->y <<= static_cast(v.y); + this->z <<= static_cast(v.z); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(U scalar) + { + this->x >>= static_cast(scalar); + this->y >>= static_cast(scalar); + this->z >>= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(vec<1, U, Q> const& v) + { + this->x >>= static_cast(v.x); + this->y >>= static_cast(v.x); + this->z >>= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(vec<3, U, Q> const& v) + { + this->x >>= static_cast(v.x); + this->y >>= static_cast(v.y); + this->z >>= static_cast(v.z); + return *this; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + -v.x, + -v.y, + -v.z); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>( + v.x + scalar, + v.y + scalar, + v.z + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>( + v.x + scalar.x, + v.y + scalar.x, + v.z + scalar.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar + v.x, + scalar + v.y, + scalar + v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar.x + v.x, + scalar.x + v.y, + scalar.x + v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>( + v1.x + v2.x, + v1.y + v2.y, + v1.z + v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>( + v.x - scalar, + v.y - scalar, + v.z - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>( + v.x - scalar.x, + v.y - scalar.x, + v.z - scalar.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar - v.x, + scalar - v.y, + scalar - v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar.x - v.x, + scalar.x - v.y, + scalar.x - v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>( + v1.x - v2.x, + v1.y - v2.y, + v1.z - v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>( + v.x * scalar, + v.y * scalar, + v.z * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>( + v.x * scalar.x, + v.y * scalar.x, + v.z * scalar.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar * v.x, + scalar * v.y, + scalar * v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar.x * v.x, + scalar.x * v.y, + scalar.x * v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>( + v1.x * v2.x, + v1.y * v2.y, + v1.z * v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>( + v.x / scalar, + v.y / scalar, + v.z / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>( + v.x / scalar.x, + v.y / scalar.x, + v.z / scalar.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar / v.x, + scalar / v.y, + scalar / v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar.x / v.x, + scalar.x / v.y, + scalar.x / v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>( + v1.x / v2.x, + v1.y / v2.y, + v1.z / v2.z); + } + + // -- Binary bit operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>( + v.x % scalar, + v.y % scalar, + v.z % scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>( + v.x % scalar.x, + v.y % scalar.x, + v.z % scalar.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar % v.x, + scalar % v.y, + scalar % v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar.x % v.x, + scalar.x % v.y, + scalar.x % v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>( + v1.x % v2.x, + v1.y % v2.y, + v1.z % v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>( + v.x & scalar, + v.y & scalar, + v.z & scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>( + v.x & scalar.x, + v.y & scalar.x, + v.z & scalar.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar & v.x, + scalar & v.y, + scalar & v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar.x & v.x, + scalar.x & v.y, + scalar.x & v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>( + v1.x & v2.x, + v1.y & v2.y, + v1.z & v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>( + v.x | scalar, + v.y | scalar, + v.z | scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>( + v.x | scalar.x, + v.y | scalar.x, + v.z | scalar.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar | v.x, + scalar | v.y, + scalar | v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar.x | v.x, + scalar.x | v.y, + scalar.x | v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>( + v1.x | v2.x, + v1.y | v2.y, + v1.z | v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>( + v.x ^ scalar, + v.y ^ scalar, + v.z ^ scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>( + v.x ^ scalar.x, + v.y ^ scalar.x, + v.z ^ scalar.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar ^ v.x, + scalar ^ v.y, + scalar ^ v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar.x ^ v.x, + scalar.x ^ v.y, + scalar.x ^ v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>( + v1.x ^ v2.x, + v1.y ^ v2.y, + v1.z ^ v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>( + v.x << scalar, + v.y << scalar, + v.z << scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>( + v.x << scalar.x, + v.y << scalar.x, + v.z << scalar.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar << v.x, + scalar << v.y, + scalar << v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar.x << v.x, + scalar.x << v.y, + scalar.x << v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>( + v1.x << v2.x, + v1.y << v2.y, + v1.z << v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>( + v.x >> scalar, + v.y >> scalar, + v.z >> scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>( + v.x >> scalar.x, + v.y >> scalar.x, + v.z >> scalar.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar >> v.x, + scalar >> v.y, + scalar >> v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + scalar.x >> v.x, + scalar.x >> v.y, + scalar.x >> v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>( + v1.x >> v2.x, + v1.y >> v2.y, + v1.z >> v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator~(vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + ~v.x, + ~v.y, + ~v.z); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return + detail::compute_equal::is_iec559>::call(v1.x, v2.x) && + detail::compute_equal::is_iec559>::call(v1.y, v2.y) && + detail::compute_equal::is_iec559>::call(v1.z, v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return !(v1 == v2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, bool, Q> operator&&(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2) + { + return vec<3, bool, Q>(v1.x && v2.x, v1.y && v2.y, v1.z && v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, bool, Q> operator||(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2) + { + return vec<3, bool, Q>(v1.x || v2.x, v1.y || v2.y, v1.z || v2.z); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4.hpp b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4.hpp new file mode 100644 index 000000000000..601256c37529 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4.hpp @@ -0,0 +1,508 @@ +/// @ref core +/// @file glm/detail/type_vec4.hpp + +#pragma once + +#include "qualifier.hpp" +#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +# include "_swizzle.hpp" +#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION +# include "_swizzle_func.hpp" +#endif +#include + +namespace glm +{ + template + struct vec<4, T, Q> + { + // -- Implementation detail -- + + typedef T value_type; + typedef vec<4, T, Q> type; + typedef vec<4, bool, Q> bool_type; + + // -- Data -- + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpedantic" +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +# pragma clang diagnostic ignored "-Wnested-anon-types" +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +# endif +# endif + +# if GLM_CONFIG_XYZW_ONLY + T x, y, z, w; +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, Q, x, y, z, w) +# endif//GLM_CONFIG_SWIZZLE +# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE + union + { + struct { T x, y, z, w; }; + struct { T r, g, b, a; }; + struct { T s, t, p, q; }; + + typename detail::storage<4, T, detail::is_aligned::value>::type data; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + GLM_SWIZZLE4_2_MEMBERS(T, Q, x, y, z, w) + GLM_SWIZZLE4_2_MEMBERS(T, Q, r, g, b, a) + GLM_SWIZZLE4_2_MEMBERS(T, Q, s, t, p, q) + GLM_SWIZZLE4_3_MEMBERS(T, Q, x, y, z, w) + GLM_SWIZZLE4_3_MEMBERS(T, Q, r, g, b, a) + GLM_SWIZZLE4_3_MEMBERS(T, Q, s, t, p, q) + GLM_SWIZZLE4_4_MEMBERS(T, Q, x, y, z, w) + GLM_SWIZZLE4_4_MEMBERS(T, Q, r, g, b, a) + GLM_SWIZZLE4_4_MEMBERS(T, Q, s, t, p, q) +# endif + }; +# else + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + union { T w, a, q; }; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, Q) +# endif +# endif + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif +# endif + + // -- Component accesses -- + + typedef length_t length_type; + + /// Return the count of components of the vector + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} + + GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); + GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec(vec<4, T, Q> const& v) GLM_DEFAULT; + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<4, T, P> const& v); + + // -- Explicit basic constructors -- + + GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); + GLM_FUNC_DECL GLM_CONSTEXPR vec(T x, T y, T z, T w); + + // -- Conversion scalar constructors -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, Z _z, W _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z, W _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z, W _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, W _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z, W _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, W _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z, vec<1, W, Q> const& _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); + + // -- Conversion vector constructors -- + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z, C _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, C _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z, vec<1, C, P> const& _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, vec<1, C, P> const& _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz, C _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, C _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, B _y, vec<2, C, P> const& _zw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, B _y, vec<2, C, P> const& _zw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, A, P> const& _xyz, B _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, A, P> const& _xyz, vec<1, B, P> const& _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<3, B, P> const& _yzw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<3, B, P> const& _yzw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<2, B, P> const& _zw); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); + + // -- Swizzle constructors -- +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<4, T, Q, E0, E1, E2, E3> const& that) + { + *this = that(); + } + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, detail::_swizzle<2, T, Q, F0, F1, -1, -2> const& u) + { + *this = vec<4, T, Q>(v(), u()); + } + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, T const& y, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v) + { + *this = vec<4, T, Q>(x, y, v()); + } + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& w) + { + *this = vec<4, T, Q>(x, v(), w); + } + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& z, T const& w) + { + *this = vec<4, T, Q>(v(), z, w); + } + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& v, T const& w) + { + *this = vec<4, T, Q>(v(), w); + } + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& v) + { + *this = vec<4, T, Q>(x, v()); + } +# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator=(vec<4, T, Q> const& v) GLM_DEFAULT; + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(vec<4, U, Q> const& v); + + // -- Increment and decrement operators -- + + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator++(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator--(int); + + // -- Unary bit operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(vec<4, U, Q> const& v); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator~(vec<4, T, Q> const& v); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> operator&&(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> operator||(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_vec4.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4.inl new file mode 100644 index 000000000000..440de5fcb7ea --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4.inl @@ -0,0 +1,1142 @@ +/// @ref core + +#include "compute_vector_relational.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_vec4_add + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); + } + }; + + template + struct compute_vec4_sub + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); + } + }; + + template + struct compute_vec4_mul + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); + } + }; + + template + struct compute_vec4_div + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); + } + }; + + template + struct compute_vec4_mod + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(a.x % b.x, a.y % b.y, a.z % b.z, a.w % b.w); + } + }; + + template + struct compute_vec4_and + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(a.x & b.x, a.y & b.y, a.z & b.z, a.w & b.w); + } + }; + + template + struct compute_vec4_or + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(a.x | b.x, a.y | b.y, a.z | b.z, a.w | b.w); + } + }; + + template + struct compute_vec4_xor + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w); + } + }; + + template + struct compute_vec4_shift_left + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(a.x << b.x, a.y << b.y, a.z << b.z, a.w << b.w); + } + }; + + template + struct compute_vec4_shift_right + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(a.x >> b.x, a.y >> b.y, a.z >> b.z, a.w >> b.w); + } + }; + + template + struct compute_vec4_equal + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return + detail::compute_equal::is_iec559>::call(v1.x, v2.x) && + detail::compute_equal::is_iec559>::call(v1.y, v2.y) && + detail::compute_equal::is_iec559>::call(v1.z, v2.z) && + detail::compute_equal::is_iec559>::call(v1.w, v2.w); + } + }; + + template + struct compute_vec4_nequal + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return !compute_vec4_equal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); + } + }; + + template + struct compute_vec4_bitwise_not + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& v) + { + return vec<4, T, Q>(~v.x, ~v.y, ~v.z, ~v.w); + } + }; +}//namespace detail + + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec() +# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE + : x(0), y(0), z(0), w(0) +# endif + {} +# endif + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, T, Q> const& v) + : x(v.x), y(v.y), z(v.z), w(v.w) + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, T, P> const& v) + : x(v.x), y(v.y), z(v.z), w(v.w) + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(T scalar) + : x(scalar), y(scalar), z(scalar), w(scalar) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(T _x, T _y, T _z, T _w) + : x(_x), y(_y), z(_z), w(_w) + {} + + // -- Conversion scalar constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.x)) + , z(static_cast(v.x)) + , w(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, Z _z, W _w) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z, W _w) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z, W _w) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, W _w) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z, W _w) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, W _w) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z, vec<1, W, Q> const& _w) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + , w(static_cast(_w.x)) + {} + + // -- Conversion vector constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, B _z, C _w) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, C _w) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z.x)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, B _z, vec<1, C, P> const& _w) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, vec<1, C, P> const& _w) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z.x)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<2, B, P> const& _yz, C _w) + : x(static_cast(_x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, C _w) + : x(static_cast(_x.x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w) + : x(static_cast(_x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w) + : x(static_cast(_x.x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, B _y, vec<2, C, P> const& _zw) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_zw.x)) + , w(static_cast(_zw.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, B _y, vec<2, C, P> const& _zw) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_zw.x)) + , w(static_cast(_zw.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_zw.x)) + , w(static_cast(_zw.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_zw.x)) + , w(static_cast(_zw.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<3, A, P> const& _xyz, B _w) + : x(static_cast(_xyz.x)) + , y(static_cast(_xyz.y)) + , z(static_cast(_xyz.z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<3, A, P> const& _xyz, vec<1, B, P> const& _w) + : x(static_cast(_xyz.x)) + , y(static_cast(_xyz.y)) + , z(static_cast(_xyz.z)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<3, B, P> const& _yzw) + : x(static_cast(_x)) + , y(static_cast(_yzw.x)) + , z(static_cast(_yzw.y)) + , w(static_cast(_yzw.z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<3, B, P> const& _yzw) + : x(static_cast(_x.x)) + , y(static_cast(_yzw.x)) + , z(static_cast(_yzw.y)) + , w(static_cast(_yzw.z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<2, B, P> const& _zw) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_zw.x)) + , w(static_cast(_zw.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + , z(static_cast(v.z)) + , w(static_cast(v.w)) + {} + + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T& vec<4, T, Q>::operator[](typename vec<4, T, Q>::length_type i) + { + assert(i >= 0 && i < this->length()); + switch(i) + { + default: + case 0: + return x; + case 1: + return y; + case 2: + return z; + case 3: + return w; + } + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<4, T, Q>::operator[](typename vec<4, T, Q>::length_type i) const + { + assert(i >= 0 && i < this->length()); + switch(i) + { + default: + case 0: + return x; + case 1: + return y; + case 2: + return z; + case 3: + return w; + } + } + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>& vec<4, T, Q>::operator=(vec<4, T, Q> const& v) + { + this->x = v.x; + this->y = v.y; + this->z = v.z; + this->w = v.w; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>& vec<4, T, Q>::operator=(vec<4, U, Q> const& v) + { + this->x = static_cast(v.x); + this->y = static_cast(v.y); + this->z = static_cast(v.z); + this->w = static_cast(v.w); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(U scalar) + { + return (*this = detail::compute_vec4_add::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec4_add::value>::call(*this, vec<4, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec4_add::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(U scalar) + { + return (*this = detail::compute_vec4_sub::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec4_sub::value>::call(*this, vec<4, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec4_sub::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(U scalar) + { + return (*this = detail::compute_vec4_mul::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec4_mul::value>::call(*this, vec<4, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec4_mul::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(U scalar) + { + return (*this = detail::compute_vec4_div::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec4_div::value>::call(*this, vec<4, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec4_div::value>::call(*this, vec<4, T, Q>(v))); + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator++() + { + ++this->x; + ++this->y; + ++this->z; + ++this->w; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator--() + { + --this->x; + --this->y; + --this->z; + --this->w; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> vec<4, T, Q>::operator++(int) + { + vec<4, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> vec<4, T, Q>::operator--(int) + { + vec<4, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary bit operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(U scalar) + { + return (*this = detail::compute_vec4_mod::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec4_mod::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec4_mod::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(U scalar) + { + return (*this = detail::compute_vec4_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec4_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec4_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(U scalar) + { + return (*this = detail::compute_vec4_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec4_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec4_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(U scalar) + { + return (*this = detail::compute_vec4_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec4_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec4_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(U scalar) + { + return (*this = detail::compute_vec4_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec4_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec4_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(U scalar) + { + return (*this = detail::compute_vec4_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec4_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec4_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + // -- Unary constant operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v) + { + return vec<4, T, Q>(0) -= v; + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) += scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) += v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(v) += scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v2) += v1; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) += v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) -= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) -= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) -= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) -= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) -= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) *= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) *= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(v) *= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v2) *= v1; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) *= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) /= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) /= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) /= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) /= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) /= v2; + } + + // -- Binary bit operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) %= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) %= v2.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) %= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar.x) %= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) %= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) &= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<4, T, Q>(v) &= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) &= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) &= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) &= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) |= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) |= v2.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) |= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) |= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) |= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) ^= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) ^= v2.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) ^= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) ^= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) ^= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) <<= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) <<= v2.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) <<= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) <<= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) <<= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) >>= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) >>= v2.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) >>= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) >>= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) >>= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator~(vec<4, T, Q> const& v) + { + return detail::compute_vec4_bitwise_not::value, sizeof(T) * 8, detail::is_aligned::value>::call(v); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return detail::compute_vec4_equal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return detail::compute_vec4_nequal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> operator&&(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2) + { + return vec<4, bool, Q>(v1.x && v2.x, v1.y && v2.y, v1.z && v2.z, v1.w && v2.w); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> operator||(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2) + { + return vec<4, bool, Q>(v1.x || v2.x, v1.y || v2.y, v1.z || v2.z, v1.w || v2.w); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "type_vec4_simd.inl" +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4_simd.inl new file mode 100644 index 000000000000..816ef45bf0d6 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/detail/type_vec4_simd.inl @@ -0,0 +1,788 @@ +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm { + namespace detail + { +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + template + struct _swizzle_base1<4, float, Q, E0, E1, E2, E3, true> : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec<4, float, Q> operator ()() const + { + __m128 data = *reinterpret_cast<__m128 const*>(&this->_buffer); + + vec<4, float, Q> Result; +# if GLM_ARCH & GLM_ARCH_AVX_BIT + Result.data = _mm_permute_ps(data, _MM_SHUFFLE(E3, E2, E1, E0)); +# else + Result.data = _mm_shuffle_ps(data, data, _MM_SHUFFLE(E3, E2, E1, E0)); +# endif + return Result; + } + }; + + template + struct _swizzle_base1<4, int, Q, E0, E1, E2, E3, true> : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec<4, int, Q> operator ()() const + { + __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer); + + vec<4, int, Q> Result; + Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0)); + return Result; + } + }; + + template + struct _swizzle_base1<4, uint, Q, E0, E1, E2, E3, true> : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec<4, uint, Q> operator ()() const + { + __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer); + + vec<4, uint, Q> Result; + Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0)); + return Result; + } + }; +# endif// GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + + template + struct compute_vec4_add + { + static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) + { + vec<4, float, Q> Result; + Result.data = _mm_add_ps(a.data, b.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_vec4_add + { + static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) + { + vec<4, double, Q> Result; + Result.data = _mm256_add_pd(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_vec4_sub + { + static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) + { + vec<4, float, Q> Result; + Result.data = _mm_sub_ps(a.data, b.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_vec4_sub + { + static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) + { + vec<4, double, Q> Result; + Result.data = _mm256_sub_pd(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_vec4_mul + { + static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) + { + vec<4, float, Q> Result; + Result.data = _mm_mul_ps(a.data, b.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_vec4_mul + { + static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) + { + vec<4, double, Q> Result; + Result.data = _mm256_mul_pd(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_vec4_div + { + static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) + { + vec<4, float, Q> Result; + Result.data = _mm_div_ps(a.data, b.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_vec4_div + { + static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) + { + vec<4, double, Q> Result; + Result.data = _mm256_div_pd(a.data, b.data); + return Result; + } + }; +# endif + + template<> + struct compute_vec4_div + { + static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& a, vec<4, float, aligned_lowp> const& b) + { + vec<4, float, aligned_lowp> Result; + Result.data = _mm_mul_ps(a.data, _mm_rcp_ps(b.data)); + return Result; + } + }; + + template + struct compute_vec4_and + { + static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> Result; + Result.data = _mm_and_si128(a.data, b.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + template + struct compute_vec4_and + { + static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> Result; + Result.data = _mm256_and_si256(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_vec4_or + { + static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> Result; + Result.data = _mm_or_si128(a.data, b.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + template + struct compute_vec4_or + { + static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> Result; + Result.data = _mm256_or_si256(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_vec4_xor + { + static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> Result; + Result.data = _mm_xor_si128(a.data, b.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + template + struct compute_vec4_xor + { + static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> Result; + Result.data = _mm256_xor_si256(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_vec4_shift_left + { + static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> Result; + Result.data = _mm_sll_epi32(a.data, b.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + template + struct compute_vec4_shift_left + { + static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> Result; + Result.data = _mm256_sll_epi64(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_vec4_shift_right + { + static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> Result; + Result.data = _mm_srl_epi32(a.data, b.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + template + struct compute_vec4_shift_right + { + static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + vec<4, T, Q> Result; + Result.data = _mm256_srl_epi64(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_vec4_bitwise_not + { + static vec<4, T, Q> call(vec<4, T, Q> const& v) + { + vec<4, T, Q> Result; + Result.data = _mm_xor_si128(v.data, _mm_set1_epi32(-1)); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + template + struct compute_vec4_bitwise_not + { + static vec<4, T, Q> call(vec<4, T, Q> const& v) + { + vec<4, T, Q> Result; + Result.data = _mm256_xor_si256(v.data, _mm_set1_epi32(-1)); + return Result; + } + }; +# endif + + template + struct compute_vec4_equal + { + static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) + { + return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) == 0; + } + }; + +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + template + struct compute_vec4_equal + { + static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) + { + //return _mm_movemask_epi8(_mm_cmpeq_epi32(v1.data, v2.data)) != 0; + __m128i neq = _mm_xor_si128(v1.data, v2.data); + return _mm_test_all_zeros(neq, neq) == 0; + } + }; +# endif + + template + struct compute_vec4_nequal + { + static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) + { + return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) != 0; + } + }; + +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + template + struct compute_vec4_nequal + { + static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) + { + //return _mm_movemask_epi8(_mm_cmpneq_epi32(v1.data, v2.data)) != 0; + __m128i neq = _mm_xor_si128(v1.data, v2.data); + return _mm_test_all_zeros(neq, neq) != 0; + } + }; +# endif + }//namespace detail + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _s) : + data(_mm_set1_ps(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _s) : + data(_mm_set1_ps(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _s) : + data(_mm_set1_ps(_s)) + {} + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_lowp>::vec(double _s) : + data(_mm256_set1_pd(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_mediump>::vec(double _s) : + data(_mm256_set1_pd(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_highp>::vec(double _s) : + data(_mm256_set1_pd(_s)) + {} +# endif + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _s) : + data(_mm_set1_epi32(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _s) : + data(_mm_set1_epi32(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _s) : + data(_mm_set1_epi32(_s)) + {} + +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_lowp>::vec(detail::int64 _s) : + data(_mm256_set1_epi64x(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_mediump>::vec(detail::int64 _s) : + data(_mm256_set1_epi64x(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_highp>::vec(detail::int64 _s) : + data(_mm256_set1_epi64x(_s)) + {} +# endif + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _x, float _y, float _z, float _w) : + data(_mm_set_ps(_w, _z, _y, _x)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _x, float _y, float _z, float _w) : + data(_mm_set_ps(_w, _z, _y, _x)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _x, float _y, float _z, float _w) : + data(_mm_set_ps(_w, _z, _y, _x)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _x, int _y, int _z, int _w) : + data(_mm_set_epi32(_w, _z, _y, _x)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _x, int _y, int _z, int _w) : + data(_mm_set_epi32(_w, _z, _y, _x)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _x, int _y, int _z, int _w) : + data(_mm_set_epi32(_w, _z, _y, _x)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(int _x, int _y, int _z, int _w) : + data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x))) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(int _x, int _y, int _z, int _w) : + data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x))) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(int _x, int _y, int _z, int _w) : + data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x))) + {} +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT + +#if GLM_ARCH & GLM_ARCH_NEON_BIT +namespace glm { + namespace detail { + + template + struct compute_vec4_add + { + static + vec<4, float, Q> + call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) + { + vec<4, float, Q> Result; + Result.data = vaddq_f32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec4_add + { + static + vec<4, uint, Q> + call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b) + { + vec<4, uint, Q> Result; + Result.data = vaddq_u32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec4_add + { + static + vec<4, int, Q> + call(vec<4, int, Q> const& a, vec<4, int, Q> const& b) + { + vec<4, int, Q> Result; + Result.data = vaddq_s32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec4_sub + { + static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) + { + vec<4, float, Q> Result; + Result.data = vsubq_f32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec4_sub + { + static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b) + { + vec<4, uint, Q> Result; + Result.data = vsubq_u32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec4_sub + { + static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b) + { + vec<4, int, Q> Result; + Result.data = vsubq_s32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec4_mul + { + static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) + { + vec<4, float, Q> Result; + Result.data = vmulq_f32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec4_mul + { + static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b) + { + vec<4, uint, Q> Result; + Result.data = vmulq_u32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec4_mul + { + static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b) + { + vec<4, int, Q> Result; + Result.data = vmulq_s32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec4_div + { + static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) + { + vec<4, float, Q> Result; +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + Result.data = vdivq_f32(a.data, b.data); +#else + /* Arm assembler reference: + * + * The Newton-Raphson iteration: x[n+1] = x[n] * (2 - d * x[n]) + * converges to (1/d) if x0 is the result of VRECPE applied to d. + * + * Note: The precision usually improves with two interactions, but more than two iterations are not helpful. */ + float32x4_t x = vrecpeq_f32(b.data); + x = vmulq_f32(vrecpsq_f32(b.data, x), x); + x = vmulq_f32(vrecpsq_f32(b.data, x), x); + Result.data = vmulq_f32(a.data, x); +#endif + return Result; + } + }; + + template + struct compute_vec4_equal + { + static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) + { + uint32x4_t cmp = vceqq_f32(v1.data, v2.data); +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + cmp = vpminq_u32(cmp, cmp); + cmp = vpminq_u32(cmp, cmp); + uint32_t r = cmp[0]; +#else + uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp)); + cmpx2 = vpmin_u32(cmpx2, cmpx2); + uint32_t r = cmpx2[0]; +#endif + return r == ~0u; + } + }; + + template + struct compute_vec4_equal + { + static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) + { + uint32x4_t cmp = vceqq_u32(v1.data, v2.data); +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + cmp = vpminq_u32(cmp, cmp); + cmp = vpminq_u32(cmp, cmp); + uint32_t r = cmp[0]; +#else + uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp)); + cmpx2 = vpmin_u32(cmpx2, cmpx2); + uint32_t r = cmpx2[0]; +#endif + return r == ~0u; + } + }; + + template + struct compute_vec4_equal + { + static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) + { + uint32x4_t cmp = vceqq_s32(v1.data, v2.data); +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + cmp = vpminq_u32(cmp, cmp); + cmp = vpminq_u32(cmp, cmp); + uint32_t r = cmp[0]; +#else + uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp)); + cmpx2 = vpmin_u32(cmpx2, cmpx2); + uint32_t r = cmpx2[0]; +#endif + return r == ~0u; + } + }; + + template + struct compute_vec4_nequal + { + static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) + { + return !compute_vec4_equal::call(v1, v2); + } + }; + + template + struct compute_vec4_nequal + { + static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) + { + return !compute_vec4_equal::call(v1, v2); + } + }; + + template + struct compute_vec4_nequal + { + static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) + { + return !compute_vec4_equal::call(v1, v2); + } + }; + + }//namespace detail + +#if !GLM_CONFIG_XYZW_ONLY + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _s) : + data(vdupq_n_f32(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _s) : + data(vdupq_n_f32(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _s) : + data(vdupq_n_f32(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _s) : + data(vdupq_n_s32(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _s) : + data(vdupq_n_s32(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _s) : + data(vdupq_n_s32(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_lowp>::vec(uint _s) : + data(vdupq_n_u32(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_mediump>::vec(uint _s) : + data(vdupq_n_u32(_s)) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_highp>::vec(uint _s) : + data(vdupq_n_u32(_s)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, float, aligned_highp>& rhs) : + data(rhs.data) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, int, aligned_highp>& rhs) : + data(vcvtq_f32_s32(rhs.data)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, uint, aligned_highp>& rhs) : + data(vcvtq_f32_u32(rhs.data)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(int _x, int _y, int _z, int _w) : + data(vcvtq_f32_s32(vec<4, int, aligned_lowp>(_x, _y, _z, _w).data)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(int _x, int _y, int _z, int _w) : + data(vcvtq_f32_s32(vec<4, int, aligned_mediump>(_x, _y, _z, _w).data)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(int _x, int _y, int _z, int _w) : + data(vcvtq_f32_s32(vec<4, int, aligned_highp>(_x, _y, _z, _w).data)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(uint _x, uint _y, uint _z, uint _w) : + data(vcvtq_f32_u32(vec<4, uint, aligned_lowp>(_x, _y, _z, _w).data)) + {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(uint _x, uint _y, uint _z, uint _w) : + data(vcvtq_f32_u32(vec<4, uint, aligned_mediump>(_x, _y, _z, _w).data)) + {} + + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(uint _x, uint _y, uint _z, uint _w) : + data(vcvtq_f32_u32(vec<4, uint, aligned_highp>(_x, _y, _z, _w).data)) + {} + +#endif +}//namespace glm + +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/exponential.hpp b/thirdparty/manifold/thirdparty/glm/glm/exponential.hpp new file mode 100644 index 000000000000..1614f7695dbd --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/exponential.hpp @@ -0,0 +1,110 @@ +/// @ref core +/// @file glm/exponential.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions +/// +/// @defgroup core_func_exponential Exponential functions +/// @ingroup core +/// +/// Provides GLSL exponential functions +/// +/// These all operate component-wise. The description is per component. +/// +/// Include to use these core features. + +#pragma once + +#include "detail/type_vec1.hpp" +#include "detail/type_vec2.hpp" +#include "detail/type_vec3.hpp" +#include "detail/type_vec4.hpp" +#include + +namespace glm +{ + /// @addtogroup core_func_exponential + /// @{ + + /// Returns 'base' raised to the power 'exponent'. + /// + /// @param base Floating point value. pow function is defined for input values of 'base' defined in the range (inf-, inf+) in the limit of the type qualifier. + /// @param exponent Floating point value representing the 'exponent'. + /// + /// @see GLSL pow man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec pow(vec const& base, vec const& exponent); + + /// Returns the natural exponentiation of v, i.e., e^v. + /// + /// @param v exp function is defined for input values of v defined in the range (inf-, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL exp man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec exp(vec const& v); + + /// Returns the natural logarithm of v, i.e., + /// returns the value y which satisfies the equation x = e^y. + /// Results are undefined if v <= 0. + /// + /// @param v log function is defined for input values of v defined in the range (0, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL log man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec log(vec const& v); + + /// Returns 2 raised to the v power. + /// + /// @param v exp2 function is defined for input values of v defined in the range (inf-, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL exp2 man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec exp2(vec const& v); + + /// Returns the base 2 log of x, i.e., returns the value y, + /// which satisfies the equation x = 2 ^ y. + /// + /// @param v log2 function is defined for input values of v defined in the range (0, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL log2 man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec log2(vec const& v); + + /// Returns the positive square root of v. + /// + /// @param v sqrt function is defined for input values of v defined in the range [0, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL sqrt man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec sqrt(vec const& v); + + /// Returns the reciprocal of the positive square root of v. + /// + /// @param v inversesqrt function is defined for input values of v defined in the range [0, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL inversesqrt man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec inversesqrt(vec const& v); + + /// @} +}//namespace glm + +#include "detail/func_exponential.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext.hpp new file mode 100644 index 000000000000..164dc2a18cc4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext.hpp @@ -0,0 +1,266 @@ +/// @file glm/ext.hpp +/// +/// @ref core (Dependence) + +#include "detail/setup.hpp" + +#pragma once + +#include "glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_EXT_INCLUDED_DISPLAYED) +# define GLM_MESSAGE_EXT_INCLUDED_DISPLAYED +# pragma message("GLM: All extensions included (not recommended)") +#endif//GLM_MESSAGES + +#include "./ext/matrix_clip_space.hpp" +#include "./ext/matrix_common.hpp" + +#include "./ext/matrix_double2x2.hpp" +#include "./ext/matrix_double2x2_precision.hpp" +#include "./ext/matrix_double2x3.hpp" +#include "./ext/matrix_double2x3_precision.hpp" +#include "./ext/matrix_double2x4.hpp" +#include "./ext/matrix_double2x4_precision.hpp" +#include "./ext/matrix_double3x2.hpp" +#include "./ext/matrix_double3x2_precision.hpp" +#include "./ext/matrix_double3x3.hpp" +#include "./ext/matrix_double3x3_precision.hpp" +#include "./ext/matrix_double3x4.hpp" +#include "./ext/matrix_double3x4_precision.hpp" +#include "./ext/matrix_double4x2.hpp" +#include "./ext/matrix_double4x2_precision.hpp" +#include "./ext/matrix_double4x3.hpp" +#include "./ext/matrix_double4x3_precision.hpp" +#include "./ext/matrix_double4x4.hpp" +#include "./ext/matrix_double4x4_precision.hpp" + +#include "./ext/matrix_float2x2.hpp" +#include "./ext/matrix_float2x2_precision.hpp" +#include "./ext/matrix_float2x3.hpp" +#include "./ext/matrix_float2x3_precision.hpp" +#include "./ext/matrix_float2x4.hpp" +#include "./ext/matrix_float2x4_precision.hpp" +#include "./ext/matrix_float3x2.hpp" +#include "./ext/matrix_float3x2_precision.hpp" +#include "./ext/matrix_float3x3.hpp" +#include "./ext/matrix_float3x3_precision.hpp" +#include "./ext/matrix_float3x4.hpp" +#include "./ext/matrix_float3x4_precision.hpp" +#include "./ext/matrix_float4x2.hpp" +#include "./ext/matrix_float4x2_precision.hpp" +#include "./ext/matrix_float4x3.hpp" +#include "./ext/matrix_float4x3_precision.hpp" +#include "./ext/matrix_float4x4.hpp" +#include "./ext/matrix_float4x4_precision.hpp" + +#include "./ext/matrix_int2x2.hpp" +#include "./ext/matrix_int2x2_sized.hpp" +#include "./ext/matrix_int2x3.hpp" +#include "./ext/matrix_int2x3_sized.hpp" +#include "./ext/matrix_int2x4.hpp" +#include "./ext/matrix_int2x4_sized.hpp" +#include "./ext/matrix_int3x2.hpp" +#include "./ext/matrix_int3x2_sized.hpp" +#include "./ext/matrix_int3x3.hpp" +#include "./ext/matrix_int3x3_sized.hpp" +#include "./ext/matrix_int3x4.hpp" +#include "./ext/matrix_int3x4_sized.hpp" +#include "./ext/matrix_int4x2.hpp" +#include "./ext/matrix_int4x2_sized.hpp" +#include "./ext/matrix_int4x3.hpp" +#include "./ext/matrix_int4x3_sized.hpp" +#include "./ext/matrix_int4x4.hpp" +#include "./ext/matrix_int4x4_sized.hpp" + +#include "./ext/matrix_uint2x2.hpp" +#include "./ext/matrix_uint2x2_sized.hpp" +#include "./ext/matrix_uint2x3.hpp" +#include "./ext/matrix_uint2x3_sized.hpp" +#include "./ext/matrix_uint2x4.hpp" +#include "./ext/matrix_uint2x4_sized.hpp" +#include "./ext/matrix_uint3x2.hpp" +#include "./ext/matrix_uint3x2_sized.hpp" +#include "./ext/matrix_uint3x3.hpp" +#include "./ext/matrix_uint3x3_sized.hpp" +#include "./ext/matrix_uint3x4.hpp" +#include "./ext/matrix_uint3x4_sized.hpp" +#include "./ext/matrix_uint4x2.hpp" +#include "./ext/matrix_uint4x2_sized.hpp" +#include "./ext/matrix_uint4x3.hpp" +#include "./ext/matrix_uint4x3_sized.hpp" +#include "./ext/matrix_uint4x4.hpp" +#include "./ext/matrix_uint4x4_sized.hpp" + +#include "./ext/matrix_projection.hpp" +#include "./ext/matrix_relational.hpp" +#include "./ext/matrix_transform.hpp" + +#include "./ext/quaternion_common.hpp" +#include "./ext/quaternion_double.hpp" +#include "./ext/quaternion_double_precision.hpp" +#include "./ext/quaternion_float.hpp" +#include "./ext/quaternion_float_precision.hpp" +#include "./ext/quaternion_exponential.hpp" +#include "./ext/quaternion_geometric.hpp" +#include "./ext/quaternion_relational.hpp" +#include "./ext/quaternion_transform.hpp" +#include "./ext/quaternion_trigonometric.hpp" + +#include "./ext/scalar_common.hpp" +#include "./ext/scalar_constants.hpp" +#include "./ext/scalar_integer.hpp" +#include "./ext/scalar_packing.hpp" +#include "./ext/scalar_reciprocal.hpp" +#include "./ext/scalar_relational.hpp" +#include "./ext/scalar_ulp.hpp" + +#include "./ext/scalar_int_sized.hpp" +#include "./ext/scalar_uint_sized.hpp" + +#include "./ext/vector_common.hpp" +#include "./ext/vector_integer.hpp" +#include "./ext/vector_packing.hpp" +#include "./ext/vector_reciprocal.hpp" +#include "./ext/vector_relational.hpp" +#include "./ext/vector_ulp.hpp" + +#include "./ext/vector_bool1.hpp" +#include "./ext/vector_bool1_precision.hpp" +#include "./ext/vector_bool2.hpp" +#include "./ext/vector_bool2_precision.hpp" +#include "./ext/vector_bool3.hpp" +#include "./ext/vector_bool3_precision.hpp" +#include "./ext/vector_bool4.hpp" +#include "./ext/vector_bool4_precision.hpp" + +#include "./ext/vector_double1.hpp" +#include "./ext/vector_double1_precision.hpp" +#include "./ext/vector_double2.hpp" +#include "./ext/vector_double2_precision.hpp" +#include "./ext/vector_double3.hpp" +#include "./ext/vector_double3_precision.hpp" +#include "./ext/vector_double4.hpp" +#include "./ext/vector_double4_precision.hpp" + +#include "./ext/vector_float1.hpp" +#include "./ext/vector_float1_precision.hpp" +#include "./ext/vector_float2.hpp" +#include "./ext/vector_float2_precision.hpp" +#include "./ext/vector_float3.hpp" +#include "./ext/vector_float3_precision.hpp" +#include "./ext/vector_float4.hpp" +#include "./ext/vector_float4_precision.hpp" + +#include "./ext/vector_int1.hpp" +#include "./ext/vector_int1_sized.hpp" +#include "./ext/vector_int2.hpp" +#include "./ext/vector_int2_sized.hpp" +#include "./ext/vector_int3.hpp" +#include "./ext/vector_int3_sized.hpp" +#include "./ext/vector_int4.hpp" +#include "./ext/vector_int4_sized.hpp" + +#include "./ext/vector_uint1.hpp" +#include "./ext/vector_uint1_sized.hpp" +#include "./ext/vector_uint2.hpp" +#include "./ext/vector_uint2_sized.hpp" +#include "./ext/vector_uint3.hpp" +#include "./ext/vector_uint3_sized.hpp" +#include "./ext/vector_uint4.hpp" +#include "./ext/vector_uint4_sized.hpp" + +#include "./gtc/bitfield.hpp" +#include "./gtc/color_space.hpp" +#include "./gtc/constants.hpp" +#include "./gtc/epsilon.hpp" +#include "./gtc/integer.hpp" +#include "./gtc/matrix_access.hpp" +#include "./gtc/matrix_integer.hpp" +#include "./gtc/matrix_inverse.hpp" +#include "./gtc/matrix_transform.hpp" +#include "./gtc/noise.hpp" +#include "./gtc/packing.hpp" +#include "./gtc/quaternion.hpp" +#include "./gtc/random.hpp" +#include "./gtc/reciprocal.hpp" +#include "./gtc/round.hpp" +#include "./gtc/type_precision.hpp" +#include "./gtc/type_ptr.hpp" +#include "./gtc/ulp.hpp" +#include "./gtc/vec1.hpp" +#if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE +# include "./gtc/type_aligned.hpp" +#endif + +#ifdef GLM_ENABLE_EXPERIMENTAL +#include "./gtx/associated_min_max.hpp" +#include "./gtx/bit.hpp" +#include "./gtx/closest_point.hpp" +#include "./gtx/color_encoding.hpp" +#include "./gtx/color_space.hpp" +#include "./gtx/color_space_YCoCg.hpp" +#include "./gtx/common.hpp" +#include "./gtx/compatibility.hpp" +#include "./gtx/component_wise.hpp" +#include "./gtx/dual_quaternion.hpp" +#include "./gtx/easing.hpp" +#include "./gtx/euler_angles.hpp" +#include "./gtx/extend.hpp" +#include "./gtx/extended_min_max.hpp" +#include "./gtx/fast_exponential.hpp" +#include "./gtx/fast_square_root.hpp" +#include "./gtx/fast_trigonometry.hpp" +#include "./gtx/functions.hpp" +#include "./gtx/gradient_paint.hpp" +#include "./gtx/handed_coordinate_space.hpp" + +#if __cplusplus >= 201103L +#include "./gtx/hash.hpp" +#endif + +#include "./gtx/integer.hpp" +#include "./gtx/intersect.hpp" +#include "./gtx/io.hpp" +#include "./gtx/log_base.hpp" +#include "./gtx/matrix_cross_product.hpp" +#include "./gtx/matrix_decompose.hpp" +#include "./gtx/matrix_factorisation.hpp" +#include "./gtx/matrix_interpolation.hpp" +#include "./gtx/matrix_major_storage.hpp" +#include "./gtx/matrix_operation.hpp" +#include "./gtx/matrix_query.hpp" +#include "./gtx/mixed_product.hpp" +#include "./gtx/norm.hpp" +#include "./gtx/normal.hpp" +#include "./gtx/normalize_dot.hpp" +#include "./gtx/optimum_pow.hpp" +#include "./gtx/orthonormalize.hpp" +#include "./gtx/pca.hpp" +#include "./gtx/perpendicular.hpp" +#include "./gtx/polar_coordinates.hpp" +#include "./gtx/projection.hpp" +#include "./gtx/quaternion.hpp" +#include "./gtx/raw_data.hpp" +#include "./gtx/rotate_normalized_axis.hpp" +#include "./gtx/rotate_vector.hpp" +#include "./gtx/spline.hpp" +#include "./gtx/std_based_type.hpp" +#if !((GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)) +# include "./gtx/string_cast.hpp" +#endif +#include "./gtx/transform.hpp" +#include "./gtx/transform2.hpp" +#include "./gtx/vec_swizzle.hpp" +#include "./gtx/vector_angle.hpp" +#include "./gtx/vector_query.hpp" +#include "./gtx/wrap.hpp" + +#if GLM_HAS_TEMPLATE_ALIASES +# include "./gtx/scalar_multiplication.hpp" +#endif + +#if GLM_HAS_RANGE_FOR +# include "./gtx/range.hpp" +#endif +#endif//GLM_ENABLE_EXPERIMENTAL diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/_matrix_vectorize.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/_matrix_vectorize.hpp new file mode 100644 index 000000000000..0d08117ed1d1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/_matrix_vectorize.hpp @@ -0,0 +1,128 @@ +#pragma once + +namespace glm { + + namespace detail { + + template class mat, length_t C, length_t R, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<2, 2, T, Q> call(Ret (*Func)(T x), mat<2, 2, T, Q> const &x) { + return mat<2, 2, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), + Func(x[1][0]), Func(x[1][1]) + ); + } + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<2, 3, T, Q> call(Ret (*Func)(T x), mat<2, 3, T, Q> const &x) { + return mat<2, 3, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<2, 4, T, Q> call(Ret (*Func)(T x), mat<2, 4, T, Q> const &x) { + return mat<2, 4, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), Func(x[0][3]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]), Func(x[1][3]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<3, 2, T, Q> call(Ret (*Func)(T x), mat<3, 2, T, Q> const &x) { + return mat<3, 2, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), + Func(x[1][0]), Func(x[1][1]), + Func(x[2][0]), Func(x[2][1]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<3, 3, T, Q> call(Ret (*Func)(T x), mat<3, 3, T, Q> const &x) { + return mat<3, 3, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]), + Func(x[2][0]), Func(x[2][1]), Func(x[2][2]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<3, 4, T, Q> call(Ret (*Func)(T x), mat<3, 4, T, Q> const &x) { + return mat<3, 4, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), Func(x[0][3]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]), Func(x[1][3]), + Func(x[2][0]), Func(x[2][1]), Func(x[2][2]), Func(x[2][3]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<4, 2, T, Q> call(Ret (*Func)(T x), mat<4, 2, T, Q> const &x) { + return mat<4, 2, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), + Func(x[1][0]), Func(x[1][1]), + Func(x[2][0]), Func(x[2][1]), + Func(x[3][0]), Func(x[3][1]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<4, 3, T, Q> call(Ret (*Func)(T x), mat<4, 3, T, Q> const &x) { + return mat<4, 3, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]), + Func(x[2][0]), Func(x[2][1]), Func(x[2][2]), + Func(x[3][0]), Func(x[3][1]), Func(x[3][2]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<4, 4, T, Q> call(Ret (*Func)(T x), mat<4, 4, T, Q> const &x) { + return mat<4, 4, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), Func(x[0][3]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]), Func(x[1][3]), + Func(x[2][0]), Func(x[2][1]), Func(x[2][2]), Func(x[2][3]), + Func(x[3][0]), Func(x[3][1]), Func(x[3][2]), Func(x[3][3]) + ); + } + + }; + + } + +}// namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_clip_space.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_clip_space.hpp new file mode 100644 index 000000000000..43579b8eee6c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_clip_space.hpp @@ -0,0 +1,522 @@ +/// @ref ext_matrix_clip_space +/// @file glm/ext/matrix_clip_space.hpp +/// +/// @defgroup ext_matrix_clip_space GLM_EXT_matrix_clip_space +/// @ingroup ext +/// +/// Defines functions that generate clip space transformation matrices. +/// +/// The matrices generated by this extension use standard OpenGL fixed-function +/// conventions. For example, the lookAt function generates a transform from world +/// space into the specific eye space that the projective matrix functions +/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility +/// specifications defines the particular layout of this eye space. +/// +/// Include to use the features of this extension. +/// +/// @see ext_matrix_transform +/// @see ext_matrix_projection + +#pragma once + +// Dependencies +#include "../ext/scalar_constants.hpp" +#include "../geometric.hpp" +#include "../trigonometric.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_clip_space extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_clip_space + /// @{ + + /// Creates a matrix for projecting two-dimensional coordinates onto the screen. + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top, T const& zNear, T const& zFar) + /// @see gluOrtho2D man page + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> ortho( + T left, T right, T bottom, T top); + + /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH_ZO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume using left-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH_NO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH_ZO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH_NO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoZO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoNO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using the default handedness and default near and far clip planes definition. + /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + /// @see glOrtho man page + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> ortho( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a left-handed frustum matrix. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH_ZO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a left-handed frustum matrix. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH_NO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a right-handed frustum matrix. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH_ZO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a right-handed frustum matrix. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH_NO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a frustum matrix using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumZO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a frustum matrix using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumNO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a left-handed frustum matrix. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a right-handed frustum matrix. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a frustum matrix with default handedness, using the default handedness and default near and far clip planes definition. + /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @tparam T A floating-point scalar type + /// @see glFrustum man page + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustum( + T left, T right, T bottom, T top, T near, T far); + + + /// Creates a matrix for a right-handed, symmetric perspective-view frustum. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH_ZO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a right-handed, symmetric perspective-view frustum. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH_NO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a left-handed, symmetric perspective-view frustum. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH_ZO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a left-handed, symmetric perspective-view frustum. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH_NO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a symmetric perspective-view frustum using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveZO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a symmetric perspective-view frustum using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveNO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a right-handed, symmetric perspective-view frustum. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a left-handed, symmetric perspective-view frustum. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a symmetric perspective-view frustum based on the default handedness and default near and far clip planes definition. + /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @param fovy Specifies the field of view angle in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + /// @see gluPerspective man page + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspective( + T fovy, T aspect, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using right-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH_ZO( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using right-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH_NO( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using left-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH_ZO( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using left-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH_NO( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovZO( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovNO( + T fov, T width, T height, T near, T far); + + /// Builds a right-handed perspective projection matrix based on a field of view. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH( + T fov, T width, T height, T near, T far); + + /// Builds a left-handed perspective projection matrix based on a field of view. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view and the default handedness and default near and far clip planes definition. + /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFov( + T fov, T width, T height, T near, T far); + + /// Creates a matrix for a left-handed, symmetric perspective-view frustum with far plane at infinite. + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspectiveLH( + T fovy, T aspect, T near); + + /// Creates a matrix for a right-handed, symmetric perspective-view frustum with far plane at infinite. + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspectiveRH( + T fovy, T aspect, T near); + + /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite with default handedness. + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspective( + T fovy, T aspect, T near); + + /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite for graphics hardware that doesn't support depth clamping. + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> tweakedInfinitePerspective( + T fovy, T aspect, T near); + + /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite for graphics hardware that doesn't support depth clamping. + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param ep Epsilon + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> tweakedInfinitePerspective( + T fovy, T aspect, T near, T ep); + + /// @} +}//namespace glm + +#include "matrix_clip_space.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_clip_space.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_clip_space.inl new file mode 100644 index 000000000000..27fb6a13f75d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_clip_space.inl @@ -0,0 +1,595 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> ortho(T left, T right, T bottom, T top) + { + mat<4, 4, T, defaultp> Result(static_cast(1)); + Result[0][0] = static_cast(2) / (right - left); + Result[1][1] = static_cast(2) / (top - bottom); + Result[2][2] = - static_cast(1); + Result[3][0] = - (right + left) / (right - left); + Result[3][1] = - (top + bottom) / (top - bottom); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH_ZO(T left, T right, T bottom, T top, T zNear, T zFar) + { + mat<4, 4, T, defaultp> Result(1); + Result[0][0] = static_cast(2) / (right - left); + Result[1][1] = static_cast(2) / (top - bottom); + Result[2][2] = static_cast(1) / (zFar - zNear); + Result[3][0] = - (right + left) / (right - left); + Result[3][1] = - (top + bottom) / (top - bottom); + Result[3][2] = - zNear / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH_NO(T left, T right, T bottom, T top, T zNear, T zFar) + { + mat<4, 4, T, defaultp> Result(1); + Result[0][0] = static_cast(2) / (right - left); + Result[1][1] = static_cast(2) / (top - bottom); + Result[2][2] = static_cast(2) / (zFar - zNear); + Result[3][0] = - (right + left) / (right - left); + Result[3][1] = - (top + bottom) / (top - bottom); + Result[3][2] = - (zFar + zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH_ZO(T left, T right, T bottom, T top, T zNear, T zFar) + { + mat<4, 4, T, defaultp> Result(1); + Result[0][0] = static_cast(2) / (right - left); + Result[1][1] = static_cast(2) / (top - bottom); + Result[2][2] = - static_cast(1) / (zFar - zNear); + Result[3][0] = - (right + left) / (right - left); + Result[3][1] = - (top + bottom) / (top - bottom); + Result[3][2] = - zNear / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH_NO(T left, T right, T bottom, T top, T zNear, T zFar) + { + mat<4, 4, T, defaultp> Result(1); + Result[0][0] = static_cast(2) / (right - left); + Result[1][1] = static_cast(2) / (top - bottom); + Result[2][2] = - static_cast(2) / (zFar - zNear); + Result[3][0] = - (right + left) / (right - left); + Result[3][1] = - (top + bottom) / (top - bottom); + Result[3][2] = - (zFar + zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoZO(T left, T right, T bottom, T top, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return orthoLH_ZO(left, right, bottom, top, zNear, zFar); +# else + return orthoRH_ZO(left, right, bottom, top, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoNO(T left, T right, T bottom, T top, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return orthoLH_NO(left, right, bottom, top, zNear, zFar); +# else + return orthoRH_NO(left, right, bottom, top, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH(T left, T right, T bottom, T top, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return orthoLH_ZO(left, right, bottom, top, zNear, zFar); +# else + return orthoLH_NO(left, right, bottom, top, zNear, zFar); +# endif + + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH(T left, T right, T bottom, T top, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return orthoRH_ZO(left, right, bottom, top, zNear, zFar); +# else + return orthoRH_NO(left, right, bottom, top, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> ortho(T left, T right, T bottom, T top, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO + return orthoLH_ZO(left, right, bottom, top, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO + return orthoLH_NO(left, right, bottom, top, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO + return orthoRH_ZO(left, right, bottom, top, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO + return orthoRH_NO(left, right, bottom, top, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH_ZO(T left, T right, T bottom, T top, T nearVal, T farVal) + { + mat<4, 4, T, defaultp> Result(0); + Result[0][0] = (static_cast(2) * nearVal) / (right - left); + Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); + Result[2][0] = -(right + left) / (right - left); + Result[2][1] = -(top + bottom) / (top - bottom); + Result[2][2] = farVal / (farVal - nearVal); + Result[2][3] = static_cast(1); + Result[3][2] = -(farVal * nearVal) / (farVal - nearVal); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH_NO(T left, T right, T bottom, T top, T nearVal, T farVal) + { + mat<4, 4, T, defaultp> Result(0); + Result[0][0] = (static_cast(2) * nearVal) / (right - left); + Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); + Result[2][0] = -(right + left) / (right - left); + Result[2][1] = -(top + bottom) / (top - bottom); + Result[2][2] = (farVal + nearVal) / (farVal - nearVal); + Result[2][3] = static_cast(1); + Result[3][2] = - (static_cast(2) * farVal * nearVal) / (farVal - nearVal); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH_ZO(T left, T right, T bottom, T top, T nearVal, T farVal) + { + mat<4, 4, T, defaultp> Result(0); + Result[0][0] = (static_cast(2) * nearVal) / (right - left); + Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); + Result[2][0] = (right + left) / (right - left); + Result[2][1] = (top + bottom) / (top - bottom); + Result[2][2] = farVal / (nearVal - farVal); + Result[2][3] = static_cast(-1); + Result[3][2] = -(farVal * nearVal) / (farVal - nearVal); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH_NO(T left, T right, T bottom, T top, T nearVal, T farVal) + { + mat<4, 4, T, defaultp> Result(0); + Result[0][0] = (static_cast(2) * nearVal) / (right - left); + Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); + Result[2][0] = (right + left) / (right - left); + Result[2][1] = (top + bottom) / (top - bottom); + Result[2][2] = - (farVal + nearVal) / (farVal - nearVal); + Result[2][3] = static_cast(-1); + Result[3][2] = - (static_cast(2) * farVal * nearVal) / (farVal - nearVal); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumZO(T left, T right, T bottom, T top, T nearVal, T farVal) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); +# else + return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumNO(T left, T right, T bottom, T top, T nearVal, T farVal) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return frustumLH_NO(left, right, bottom, top, nearVal, farVal); +# else + return frustumRH_NO(left, right, bottom, top, nearVal, farVal); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH(T left, T right, T bottom, T top, T nearVal, T farVal) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); +# else + return frustumLH_NO(left, right, bottom, top, nearVal, farVal); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH(T left, T right, T bottom, T top, T nearVal, T farVal) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); +# else + return frustumRH_NO(left, right, bottom, top, nearVal, farVal); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustum(T left, T right, T bottom, T top, T nearVal, T farVal) + { +# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO + return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO + return frustumLH_NO(left, right, bottom, top, nearVal, farVal); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO + return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO + return frustumRH_NO(left, right, bottom, top, nearVal, farVal); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH_ZO(T fovy, T aspect, T zNear, T zFar) + { + assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); + + T const tanHalfFovy = tan(fovy / static_cast(2)); + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); + Result[1][1] = static_cast(1) / (tanHalfFovy); + Result[2][2] = zFar / (zNear - zFar); + Result[2][3] = - static_cast(1); + Result[3][2] = -(zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH_NO(T fovy, T aspect, T zNear, T zFar) + { + assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); + + T const tanHalfFovy = tan(fovy / static_cast(2)); + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); + Result[1][1] = static_cast(1) / (tanHalfFovy); + Result[2][2] = - (zFar + zNear) / (zFar - zNear); + Result[2][3] = - static_cast(1); + Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH_ZO(T fovy, T aspect, T zNear, T zFar) + { + assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); + + T const tanHalfFovy = tan(fovy / static_cast(2)); + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); + Result[1][1] = static_cast(1) / (tanHalfFovy); + Result[2][2] = zFar / (zFar - zNear); + Result[2][3] = static_cast(1); + Result[3][2] = -(zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH_NO(T fovy, T aspect, T zNear, T zFar) + { + assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); + + T const tanHalfFovy = tan(fovy / static_cast(2)); + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); + Result[1][1] = static_cast(1) / (tanHalfFovy); + Result[2][2] = (zFar + zNear) / (zFar - zNear); + Result[2][3] = static_cast(1); + Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveZO(T fovy, T aspect, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return perspectiveLH_ZO(fovy, aspect, zNear, zFar); +# else + return perspectiveRH_ZO(fovy, aspect, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveNO(T fovy, T aspect, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return perspectiveLH_NO(fovy, aspect, zNear, zFar); +# else + return perspectiveRH_NO(fovy, aspect, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH(T fovy, T aspect, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return perspectiveLH_ZO(fovy, aspect, zNear, zFar); +# else + return perspectiveLH_NO(fovy, aspect, zNear, zFar); +# endif + + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH(T fovy, T aspect, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return perspectiveRH_ZO(fovy, aspect, zNear, zFar); +# else + return perspectiveRH_NO(fovy, aspect, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspective(T fovy, T aspect, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO + return perspectiveLH_ZO(fovy, aspect, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO + return perspectiveLH_NO(fovy, aspect, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO + return perspectiveRH_ZO(fovy, aspect, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO + return perspectiveRH_NO(fovy, aspect, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH_ZO(T fov, T width, T height, T zNear, T zFar) + { + assert(width > static_cast(0)); + assert(height > static_cast(0)); + assert(fov > static_cast(0)); + + T const rad = fov; + T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); + T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = w; + Result[1][1] = h; + Result[2][2] = zFar / (zNear - zFar); + Result[2][3] = - static_cast(1); + Result[3][2] = -(zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH_NO(T fov, T width, T height, T zNear, T zFar) + { + assert(width > static_cast(0)); + assert(height > static_cast(0)); + assert(fov > static_cast(0)); + + T const rad = fov; + T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); + T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = w; + Result[1][1] = h; + Result[2][2] = - (zFar + zNear) / (zFar - zNear); + Result[2][3] = - static_cast(1); + Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH_ZO(T fov, T width, T height, T zNear, T zFar) + { + assert(width > static_cast(0)); + assert(height > static_cast(0)); + assert(fov > static_cast(0)); + + T const rad = fov; + T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); + T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = w; + Result[1][1] = h; + Result[2][2] = zFar / (zFar - zNear); + Result[2][3] = static_cast(1); + Result[3][2] = -(zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH_NO(T fov, T width, T height, T zNear, T zFar) + { + assert(width > static_cast(0)); + assert(height > static_cast(0)); + assert(fov > static_cast(0)); + + T const rad = fov; + T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); + T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = w; + Result[1][1] = h; + Result[2][2] = (zFar + zNear) / (zFar - zNear); + Result[2][3] = static_cast(1); + Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovZO(T fov, T width, T height, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); +# else + return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovNO(T fov, T width, T height, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return perspectiveFovLH_NO(fov, width, height, zNear, zFar); +# else + return perspectiveFovRH_NO(fov, width, height, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH(T fov, T width, T height, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); +# else + return perspectiveFovLH_NO(fov, width, height, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH(T fov, T width, T height, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); +# else + return perspectiveFovRH_NO(fov, width, height, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFov(T fov, T width, T height, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO + return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO + return perspectiveFovLH_NO(fov, width, height, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO + return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO + return perspectiveFovRH_NO(fov, width, height, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveRH_NO(T fovy, T aspect, T zNear) + { + T const range = tan(fovy / static_cast(2)) * zNear; + T const left = -range * aspect; + T const right = range * aspect; + T const bottom = -range; + T const top = range; + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = (static_cast(2) * zNear) / (right - left); + Result[1][1] = (static_cast(2) * zNear) / (top - bottom); + Result[2][2] = - static_cast(1); + Result[2][3] = - static_cast(1); + Result[3][2] = - static_cast(2) * zNear; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveRH_ZO(T fovy, T aspect, T zNear) + { + T const range = tan(fovy / static_cast(2)) * zNear; + T const left = -range * aspect; + T const right = range * aspect; + T const bottom = -range; + T const top = range; + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = (static_cast(2) * zNear) / (right - left); + Result[1][1] = (static_cast(2) * zNear) / (top - bottom); + Result[2][2] = - static_cast(1); + Result[2][3] = - static_cast(1); + Result[3][2] = - zNear; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveLH_NO(T fovy, T aspect, T zNear) + { + T const range = tan(fovy / static_cast(2)) * zNear; + T const left = -range * aspect; + T const right = range * aspect; + T const bottom = -range; + T const top = range; + + mat<4, 4, T, defaultp> Result(T(0)); + Result[0][0] = (static_cast(2) * zNear) / (right - left); + Result[1][1] = (static_cast(2) * zNear) / (top - bottom); + Result[2][2] = static_cast(1); + Result[2][3] = static_cast(1); + Result[3][2] = - static_cast(2) * zNear; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveLH_ZO(T fovy, T aspect, T zNear) + { + T const range = tan(fovy / static_cast(2)) * zNear; + T const left = -range * aspect; + T const right = range * aspect; + T const bottom = -range; + T const top = range; + + mat<4, 4, T, defaultp> Result(T(0)); + Result[0][0] = (static_cast(2) * zNear) / (right - left); + Result[1][1] = (static_cast(2) * zNear) / (top - bottom); + Result[2][2] = static_cast(1); + Result[2][3] = static_cast(1); + Result[3][2] = - zNear; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspective(T fovy, T aspect, T zNear) + { +# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO + return infinitePerspectiveLH_ZO(fovy, aspect, zNear); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO + return infinitePerspectiveLH_NO(fovy, aspect, zNear); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO + return infinitePerspectiveRH_ZO(fovy, aspect, zNear); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO + return infinitePerspectiveRH_NO(fovy, aspect, zNear); +# endif + } + + // Infinite projection matrix: http://www.terathon.com/gdc07_lengyel.pdf + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> tweakedInfinitePerspective(T fovy, T aspect, T zNear, T ep) + { + T const range = tan(fovy / static_cast(2)) * zNear; + T const left = -range * aspect; + T const right = range * aspect; + T const bottom = -range; + T const top = range; + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = (static_cast(2) * zNear) / (right - left); + Result[1][1] = (static_cast(2) * zNear) / (top - bottom); + Result[2][2] = ep - static_cast(1); + Result[2][3] = static_cast(-1); + Result[3][2] = (ep - static_cast(2)) * zNear; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> tweakedInfinitePerspective(T fovy, T aspect, T zNear) + { + return tweakedInfinitePerspective(fovy, aspect, zNear, epsilon()); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_common.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_common.hpp new file mode 100644 index 000000000000..6bb3d06e7ece --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_common.hpp @@ -0,0 +1,39 @@ +/// @ref ext_matrix_common +/// @file glm/ext/matrix_common.hpp +/// +/// @defgroup ext_matrix_common GLM_EXT_matrix_common +/// @ingroup ext +/// +/// Defines functions for common matrix operations. +/// +/// Include to use the features of this extension. +/// +/// @see ext_matrix_common + +#pragma once + +#include "../detail/qualifier.hpp" +#include "../detail/_fixes.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_common extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_common + /// @{ + + template + GLM_FUNC_DECL mat mix(mat const& x, mat const& y, mat const& a); + + template + GLM_FUNC_DECL mat mix(mat const& x, mat const& y, U a); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat abs(mat const& x); + + /// @} +}//namespace glm + +#include "matrix_common.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_common.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_common.inl new file mode 100644 index 000000000000..1be422202edb --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_common.inl @@ -0,0 +1,34 @@ +#include "../matrix.hpp" + +#include "_matrix_vectorize.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat mix(mat const& x, mat const& y, U a) + { + return mat(x) * (static_cast(1) - a) + mat(y) * a; + } + + template + GLM_FUNC_QUALIFIER mat mix(mat const& x, mat const& y, mat const& a) + { + return matrixCompMult(mat(x), static_cast(1) - a) + matrixCompMult(mat(y), a); + } + + template + struct compute_abs_matrix + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat call(mat const& x) + { + return detail::matrix_functor_1::call(abs, x); + } + }; + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat abs(mat const& x) + { + return compute_abs_matrix::value>::call(x); + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x2.hpp new file mode 100644 index 000000000000..94dca54b59bd --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x2.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_double2x2.hpp + +#pragma once +#include "../detail/type_mat2x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 2 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 2, double, defaultp> dmat2x2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 2, double, defaultp> dmat2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x2_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x2_precision.hpp new file mode 100644 index 000000000000..9e2c174e43be --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x2_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_double2x2_precision.hpp + +#pragma once +#include "../detail/type_mat2x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, lowp> lowp_dmat2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, mediump> mediump_dmat2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, highp> highp_dmat2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, lowp> lowp_dmat2x2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, mediump> mediump_dmat2x2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, highp> highp_dmat2x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x3.hpp new file mode 100644 index 000000000000..bfef87a666c1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double2x3.hpp + +#pragma once +#include "../detail/type_mat2x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 3 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 3, double, defaultp> dmat2x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x3_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x3_precision.hpp new file mode 100644 index 000000000000..098fb6046e88 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double2x3_precision.hpp + +#pragma once +#include "../detail/type_mat2x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, double, lowp> lowp_dmat2x3; + + /// 2 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, double, mediump> mediump_dmat2x3; + + /// 2 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, double, highp> highp_dmat2x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x4.hpp new file mode 100644 index 000000000000..499284bce161 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double2x4.hpp + +#pragma once +#include "../detail/type_mat2x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 4 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 4, double, defaultp> dmat2x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x4_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x4_precision.hpp new file mode 100644 index 000000000000..9b61ebcee1ef --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double2x4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double2x4_precision.hpp + +#pragma once +#include "../detail/type_mat2x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, double, lowp> lowp_dmat2x4; + + /// 2 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, double, mediump> mediump_dmat2x4; + + /// 2 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, double, highp> highp_dmat2x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x2.hpp new file mode 100644 index 000000000000..dd23f36cdbb2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double3x2.hpp + +#pragma once +#include "../detail/type_mat3x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 2 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 2, double, defaultp> dmat3x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x2_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x2_precision.hpp new file mode 100644 index 000000000000..068d9e911721 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double3x2_precision.hpp + +#pragma once +#include "../detail/type_mat3x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, double, lowp> lowp_dmat3x2; + + /// 3 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, double, mediump> mediump_dmat3x2; + + /// 3 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, double, highp> highp_dmat3x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x3.hpp new file mode 100644 index 000000000000..53572b735626 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x3.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_double3x3.hpp + +#pragma once +#include "../detail/type_mat3x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 3 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 3, double, defaultp> dmat3x3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 3, double, defaultp> dmat3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x3_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x3_precision.hpp new file mode 100644 index 000000000000..8691e7808dc9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x3_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_double3x3_precision.hpp + +#pragma once +#include "../detail/type_mat3x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, lowp> lowp_dmat3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, mediump> mediump_dmat3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, highp> highp_dmat3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, lowp> lowp_dmat3x3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, mediump> mediump_dmat3x3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, highp> highp_dmat3x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x4.hpp new file mode 100644 index 000000000000..c572d637cd2d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double3x4.hpp + +#pragma once +#include "../detail/type_mat3x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 4 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 4, double, defaultp> dmat3x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x4_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x4_precision.hpp new file mode 100644 index 000000000000..f040217e748a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double3x4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double3x4_precision.hpp + +#pragma once +#include "../detail/type_mat3x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, double, lowp> lowp_dmat3x4; + + /// 3 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, double, mediump> mediump_dmat3x4; + + /// 3 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, double, highp> highp_dmat3x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x2.hpp new file mode 100644 index 000000000000..9b229f471e8d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double4x2.hpp + +#pragma once +#include "../detail/type_mat4x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 4 columns of 2 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 2, double, defaultp> dmat4x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x2_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x2_precision.hpp new file mode 100644 index 000000000000..6ad18ba9e65e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double4x2_precision.hpp + +#pragma once +#include "../detail/type_mat4x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, double, lowp> lowp_dmat4x2; + + /// 4 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, double, mediump> mediump_dmat4x2; + + /// 4 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, double, highp> highp_dmat4x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x3.hpp new file mode 100644 index 000000000000..dca4cf956f91 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double4x3.hpp + +#pragma once +#include "../detail/type_mat4x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 4 columns of 3 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 3, double, defaultp> dmat4x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x3_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x3_precision.hpp new file mode 100644 index 000000000000..f7371de84942 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double4x3_precision.hpp + +#pragma once +#include "../detail/type_mat4x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, double, lowp> lowp_dmat4x3; + + /// 4 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, double, mediump> mediump_dmat4x3; + + /// 4 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, double, highp> highp_dmat4x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x4.hpp new file mode 100644 index 000000000000..81e1bf65cb5c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x4.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_double4x4.hpp + +#pragma once +#include "../detail/type_mat4x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 4 columns of 4 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 4, double, defaultp> dmat4x4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 4, double, defaultp> dmat4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x4_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x4_precision.hpp new file mode 100644 index 000000000000..4c36a8486c72 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_double4x4_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_double4x4_precision.hpp + +#pragma once +#include "../detail/type_mat4x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, lowp> lowp_dmat4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, mediump> mediump_dmat4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, highp> highp_dmat4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, lowp> lowp_dmat4x4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, mediump> mediump_dmat4x4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, highp> highp_dmat4x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x2.hpp new file mode 100644 index 000000000000..53df921fe216 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x2.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_float2x2.hpp + +#pragma once +#include "../detail/type_mat2x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 2 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 2, float, defaultp> mat2x2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 2, float, defaultp> mat2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x2_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x2_precision.hpp new file mode 100644 index 000000000000..898b6db71408 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x2_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_float2x2_precision.hpp + +#pragma once +#include "../detail/type_mat2x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, lowp> lowp_mat2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, mediump> mediump_mat2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, highp> highp_mat2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, lowp> lowp_mat2x2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, mediump> mediump_mat2x2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, highp> highp_mat2x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x3.hpp new file mode 100644 index 000000000000..6f68822dbf1e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float2x3.hpp + +#pragma once +#include "../detail/type_mat2x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 3 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 3, float, defaultp> mat2x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x3_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x3_precision.hpp new file mode 100644 index 000000000000..50c103245c3a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float2x3_precision.hpp + +#pragma once +#include "../detail/type_mat2x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, float, lowp> lowp_mat2x3; + + /// 2 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, float, mediump> mediump_mat2x3; + + /// 2 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, float, highp> highp_mat2x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x4.hpp new file mode 100644 index 000000000000..30f30de3cbd4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float2x4.hpp + +#pragma once +#include "../detail/type_mat2x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 4 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 4, float, defaultp> mat2x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x4_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x4_precision.hpp new file mode 100644 index 000000000000..079d63828631 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float2x4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float2x4_precision.hpp + +#pragma once +#include "../detail/type_mat2x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, float, lowp> lowp_mat2x4; + + /// 2 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, float, mediump> mediump_mat2x4; + + /// 2 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, float, highp> highp_mat2x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x2.hpp new file mode 100644 index 000000000000..280d0a3e974b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float3x2.hpp + +#pragma once +#include "../detail/type_mat3x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 2 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 2, float, defaultp> mat3x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x2_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x2_precision.hpp new file mode 100644 index 000000000000..8572c2a1b20e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float3x2_precision.hpp + +#pragma once +#include "../detail/type_mat3x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, float, lowp> lowp_mat3x2; + + /// 3 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, float, mediump> mediump_mat3x2; + + /// 3 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, float, highp> highp_mat3x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x3.hpp new file mode 100644 index 000000000000..177d809ff9f4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x3.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_float3x3.hpp + +#pragma once +#include "../detail/type_mat3x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 3 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 3, float, defaultp> mat3x3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 3, float, defaultp> mat3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x3_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x3_precision.hpp new file mode 100644 index 000000000000..8a900c164200 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x3_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_float3x3_precision.hpp + +#pragma once +#include "../detail/type_mat3x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, lowp> lowp_mat3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, mediump> mediump_mat3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, highp> highp_mat3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, lowp> lowp_mat3x3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, mediump> mediump_mat3x3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, highp> highp_mat3x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x4.hpp new file mode 100644 index 000000000000..64b8459dcdd2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float3x4.hpp + +#pragma once +#include "../detail/type_mat3x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 4 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 4, float, defaultp> mat3x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x4_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x4_precision.hpp new file mode 100644 index 000000000000..bc36bf13a1e9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float3x4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float3x4_precision.hpp + +#pragma once +#include "../detail/type_mat3x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, float, lowp> lowp_mat3x4; + + /// 3 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, float, mediump> mediump_mat3x4; + + /// 3 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, float, highp> highp_mat3x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x2.hpp new file mode 100644 index 000000000000..1ed5227bf580 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float4x2.hpp + +#pragma once +#include "../detail/type_mat4x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 4 columns of 2 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 2, float, defaultp> mat4x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x2_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x2_precision.hpp new file mode 100644 index 000000000000..88fd069630a8 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float2x2_precision.hpp + +#pragma once +#include "../detail/type_mat2x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, float, lowp> lowp_mat4x2; + + /// 4 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, float, mediump> mediump_mat4x2; + + /// 4 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, float, highp> highp_mat4x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x3.hpp new file mode 100644 index 000000000000..5dbe7657043f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float4x3.hpp + +#pragma once +#include "../detail/type_mat4x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 4 columns of 3 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 3, float, defaultp> mat4x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x3_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x3_precision.hpp new file mode 100644 index 000000000000..846ed4fc8d9c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float4x3_precision.hpp + +#pragma once +#include "../detail/type_mat4x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, float, lowp> lowp_mat4x3; + + /// 4 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, float, mediump> mediump_mat4x3; + + /// 4 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, float, highp> highp_mat4x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x4.hpp new file mode 100644 index 000000000000..5ba111de0481 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x4.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_float4x4.hpp + +#pragma once +#include "../detail/type_mat4x4.hpp" + +namespace glm +{ + /// @ingroup core_matrix + /// @{ + + /// 4 columns of 4 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 4, float, defaultp> mat4x4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 4, float, defaultp> mat4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x4_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x4_precision.hpp new file mode 100644 index 000000000000..597149bcf90f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_float4x4_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_float4x4_precision.hpp + +#pragma once +#include "../detail/type_mat4x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, lowp> lowp_mat4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, mediump> mediump_mat4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, highp> highp_mat4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, lowp> lowp_mat4x4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, mediump> mediump_mat4x4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, highp> highp_mat4x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x2.hpp new file mode 100644 index 000000000000..c6aa0686ae78 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x2.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_int2x2 +/// @file glm/ext/matrix_int2x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x2 GLM_EXT_matrix_int2x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x2 + /// @{ + + /// Signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2 + typedef mat<2, 2, int, defaultp> imat2x2; + + /// Signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2 + typedef mat<2, 2, int, defaultp> imat2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x2_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x2_sized.hpp new file mode 100644 index 000000000000..70c0c2106acd --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x2_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_int2x2_sized +/// @file glm/ext/matrix_int2x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x2_sized GLM_EXT_matrix_int2x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x2.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x2_sized + /// @{ + + /// 8 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int8, defaultp> i8mat2x2; + + /// 16 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int16, defaultp> i16mat2x2; + + /// 32 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int32, defaultp> i32mat2x2; + + /// 64 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int64, defaultp> i64mat2x2; + + + /// 8 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int8, defaultp> i8mat2; + + /// 16 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int16, defaultp> i16mat2; + + /// 32 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int32, defaultp> i32mat2; + + /// 64 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int64, defaultp> i64mat2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x3.hpp new file mode 100644 index 000000000000..aee415caa6e1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x3.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int2x3 +/// @file glm/ext/matrix_int2x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x3 GLM_EXT_matrix_int2x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x3 + /// @{ + + /// Signed integer 2x3 matrix. + /// + /// @see ext_matrix_int2x3 + typedef mat<2, 3, int, defaultp> imat2x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x3_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x3_sized.hpp new file mode 100644 index 000000000000..b5526fe55143 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int2x3_sized +/// @file glm/ext/matrix_int2x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x3_sized GLM_EXT_matrix_int2x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x3.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x3_sized + /// @{ + + /// 8 bit signed integer 2x3 matrix. + /// + /// @see ext_matrix_int2x3_sized + typedef mat<2, 3, int8, defaultp> i8mat2x3; + + /// 16 bit signed integer 2x3 matrix. + /// + /// @see ext_matrix_int2x3_sized + typedef mat<2, 3, int16, defaultp> i16mat2x3; + + /// 32 bit signed integer 2x3 matrix. + /// + /// @see ext_matrix_int2x3_sized + typedef mat<2, 3, int32, defaultp> i32mat2x3; + + /// 64 bit signed integer 2x3 matrix. + /// + /// @see ext_matrix_int2x3_sized + typedef mat<2, 3, int64, defaultp> i64mat2x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x4.hpp new file mode 100644 index 000000000000..4f36331d6602 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x4.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int2x4 +/// @file glm/ext/matrix_int2x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x4 GLM_EXT_matrix_int2x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x4 + /// @{ + + /// Signed integer 2x4 matrix. + /// + /// @see ext_matrix_int2x4 + typedef mat<2, 4, int, defaultp> imat2x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x4_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x4_sized.hpp new file mode 100644 index 000000000000..a66a5e726881 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int2x4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int2x4_sized +/// @file glm/ext/matrix_int2x4_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x4_sized GLM_EXT_matrix_int2x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x4.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x4_sized + /// @{ + + /// 8 bit signed integer 2x4 matrix. + /// + /// @see ext_matrix_int2x4_sized + typedef mat<2, 4, int8, defaultp> i8mat2x4; + + /// 16 bit signed integer 2x4 matrix. + /// + /// @see ext_matrix_int2x4_sized + typedef mat<2, 4, int16, defaultp> i16mat2x4; + + /// 32 bit signed integer 2x4 matrix. + /// + /// @see ext_matrix_int2x4_sized + typedef mat<2, 4, int32, defaultp> i32mat2x4; + + /// 64 bit signed integer 2x4 matrix. + /// + /// @see ext_matrix_int2x4_sized + typedef mat<2, 4, int64, defaultp> i64mat2x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x2.hpp new file mode 100644 index 000000000000..3bd563b7de9c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x2.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int3x2 +/// @file glm/ext/matrix_int3x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x2 GLM_EXT_matrix_int3x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x2 + /// @{ + + /// Signed integer 3x2 matrix. + /// + /// @see ext_matrix_int3x2 + typedef mat<3, 2, int, defaultp> imat3x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x2_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x2_sized.hpp new file mode 100644 index 000000000000..7e34c5240f40 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int3x2_sized +/// @file glm/ext/matrix_int3x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x2_sized GLM_EXT_matrix_int3x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x2.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x2_sized + /// @{ + + /// 8 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_int3x2_sized + typedef mat<3, 2, int8, defaultp> i8mat3x2; + + /// 16 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_int3x2_sized + typedef mat<3, 2, int16, defaultp> i16mat3x2; + + /// 32 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_int3x2_sized + typedef mat<3, 2, int32, defaultp> i32mat3x2; + + /// 64 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_int3x2_sized + typedef mat<3, 2, int64, defaultp> i64mat3x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x3.hpp new file mode 100644 index 000000000000..287488da0343 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x3.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_int3x3 +/// @file glm/ext/matrix_int3x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x3 GLM_EXT_matrix_int3x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x3 + /// @{ + + /// Signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3 + typedef mat<3, 3, int, defaultp> imat3x3; + + /// Signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3 + typedef mat<3, 3, int, defaultp> imat3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x3_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x3_sized.hpp new file mode 100644 index 000000000000..577e305aa7f7 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x3_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_int3x3_sized +/// @file glm/ext/matrix_int3x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x3_sized GLM_EXT_matrix_int3x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x3.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x3_sized + /// @{ + + /// 8 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int8, defaultp> i8mat3x3; + + /// 16 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int16, defaultp> i16mat3x3; + + /// 32 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int32, defaultp> i32mat3x3; + + /// 64 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int64, defaultp> i64mat3x3; + + + /// 8 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int8, defaultp> i8mat3; + + /// 16 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int16, defaultp> i16mat3; + + /// 32 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int32, defaultp> i32mat3; + + /// 64 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int64, defaultp> i64mat3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x4.hpp new file mode 100644 index 000000000000..08e534d9c4d5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x4.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int3x4 +/// @file glm/ext/matrix_int3x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x4 GLM_EXT_matrix_int3x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x4 + /// @{ + + /// Signed integer 3x4 matrix. + /// + /// @see ext_matrix_int3x4 + typedef mat<3, 4, int, defaultp> imat3x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x4_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x4_sized.hpp new file mode 100644 index 000000000000..692c48c439e7 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int3x4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int3x4_sized +/// @file glm/ext/matrix_int3x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x4_sized GLM_EXT_matrix_int3x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x4.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x4_sized + /// @{ + + /// 8 bit signed integer 3x4 matrix. + /// + /// @see ext_matrix_int3x4_sized + typedef mat<3, 4, int8, defaultp> i8mat3x4; + + /// 16 bit signed integer 3x4 matrix. + /// + /// @see ext_matrix_int3x4_sized + typedef mat<3, 4, int16, defaultp> i16mat3x4; + + /// 32 bit signed integer 3x4 matrix. + /// + /// @see ext_matrix_int3x4_sized + typedef mat<3, 4, int32, defaultp> i32mat3x4; + + /// 64 bit signed integer 3x4 matrix. + /// + /// @see ext_matrix_int3x4_sized + typedef mat<3, 4, int64, defaultp> i64mat3x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x2.hpp new file mode 100644 index 000000000000..f756ef2804c8 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x2.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int4x2 +/// @file glm/ext/matrix_int4x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x2 GLM_EXT_matrix_int4x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x2 + /// @{ + + /// Signed integer 4x2 matrix. + /// + /// @see ext_matrix_int4x2 + typedef mat<4, 2, int, defaultp> imat4x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x2_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x2_sized.hpp new file mode 100644 index 000000000000..63a99d604dc5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int4x2_sized +/// @file glm/ext/matrix_int4x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x2_sized GLM_EXT_matrix_int4x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x2.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x2_sized + /// @{ + + /// 8 bit signed integer 4x2 matrix. + /// + /// @see ext_matrix_int4x2_sized + typedef mat<4, 2, int8, defaultp> i8mat4x2; + + /// 16 bit signed integer 4x2 matrix. + /// + /// @see ext_matrix_int4x2_sized + typedef mat<4, 2, int16, defaultp> i16mat4x2; + + /// 32 bit signed integer 4x2 matrix. + /// + /// @see ext_matrix_int4x2_sized + typedef mat<4, 2, int32, defaultp> i32mat4x2; + + /// 64 bit signed integer 4x2 matrix. + /// + /// @see ext_matrix_int4x2_sized + typedef mat<4, 2, int64, defaultp> i64mat4x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x3.hpp new file mode 100644 index 000000000000..d5d97a7a37cf --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x3.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int4x3 +/// @file glm/ext/matrix_int4x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x3 GLM_EXT_matrix_int4x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x3 + /// @{ + + /// Signed integer 4x3 matrix. + /// + /// @see ext_matrix_int4x3 + typedef mat<4, 3, int, defaultp> imat4x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x3_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x3_sized.hpp new file mode 100644 index 000000000000..55078fadc60f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int4x3_sized +/// @file glm/ext/matrix_int4x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x3_sized GLM_EXT_matrix_int4x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x3.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x3_sized + /// @{ + + /// 8 bit signed integer 4x3 matrix. + /// + /// @see ext_matrix_int4x3_sized + typedef mat<4, 3, int8, defaultp> i8mat4x3; + + /// 16 bit signed integer 4x3 matrix. + /// + /// @see ext_matrix_int4x3_sized + typedef mat<4, 3, int16, defaultp> i16mat4x3; + + /// 32 bit signed integer 4x3 matrix. + /// + /// @see ext_matrix_int4x3_sized + typedef mat<4, 3, int32, defaultp> i32mat4x3; + + /// 64 bit signed integer 4x3 matrix. + /// + /// @see ext_matrix_int4x3_sized + typedef mat<4, 3, int64, defaultp> i64mat4x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x4.hpp new file mode 100644 index 000000000000..e17cff17f9fb --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x4.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_int4x4 +/// @file glm/ext/matrix_int4x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x4 GLM_EXT_matrix_int4x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x4 + /// @{ + + /// Signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4 + typedef mat<4, 4, int, defaultp> imat4x4; + + /// Signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4 + typedef mat<4, 4, int, defaultp> imat4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x4_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x4_sized.hpp new file mode 100644 index 000000000000..4a11203eb25b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_int4x4_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_int4x4_sized +/// @file glm/ext/matrix_int4x4_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x4_sized GLM_EXT_matrix_int4x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x4.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x4_sized + /// @{ + + /// 8 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int8, defaultp> i8mat4x4; + + /// 16 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int16, defaultp> i16mat4x4; + + /// 32 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int32, defaultp> i32mat4x4; + + /// 64 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int64, defaultp> i64mat4x4; + + + /// 8 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int8, defaultp> i8mat4; + + /// 16 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int16, defaultp> i16mat4; + + /// 32 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int32, defaultp> i32mat4; + + /// 64 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int64, defaultp> i64mat4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_integer.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_integer.hpp new file mode 100644 index 000000000000..7d7dfc5a5b08 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_integer.hpp @@ -0,0 +1,91 @@ +/// @ref ext_matrix_integer +/// @file glm/ext/matrix_integer.hpp +/// +/// @defgroup ext_matrix_integer GLM_EXT_matrix_integer +/// @ingroup ext +/// +/// Defines functions that generate common transformation matrices. +/// +/// The matrices generated by this extension use standard OpenGL fixed-function +/// conventions. For example, the lookAt function generates a transform from world +/// space into the specific eye space that the projective matrix functions +/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility +/// specifications defines the particular layout of this eye space. +/// +/// Include to use the features of this extension. +/// +/// @see ext_matrix_projection +/// @see ext_matrix_clip_space + +#pragma once + +// Dependencies +#include "../gtc/constants.hpp" +#include "../geometric.hpp" +#include "../trigonometric.hpp" +#include "../matrix.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_integer + /// @{ + + /// Multiply matrix x by matrix y component-wise, i.e., + /// result[i][j] is the scalar product of x[i][j] and y[i][j]. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL matrixCompMult man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL mat matrixCompMult(mat const& x, mat const& y); + + /// Treats the first parameter c as a column vector + /// and the second parameter r as a row vector + /// and does a linear algebraic matrix multiply c * r. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL outerProduct man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL typename detail::outerProduct_trait::type outerProduct(vec const& c, vec const& r); + + /// Returns the transposed matrix of x + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL transpose man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL typename mat::transpose_type transpose(mat const& x); + + /// Return the determinant of a squared matrix. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL determinant man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL T determinant(mat const& m); + + /// @} +}//namespace glm + +#include "matrix_integer.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_integer.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_integer.inl new file mode 100644 index 000000000000..8b377ce2a8e6 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_integer.inl @@ -0,0 +1,38 @@ +namespace glm{ +namespace detail +{ + template + struct compute_matrixCompMult_type { + GLM_FUNC_QUALIFIER static mat call(mat const& x, mat const& y) + { + return detail::compute_matrixCompMult::value>::call(x, y); + } + }; + + template + struct compute_outerProduct_type { + GLM_FUNC_QUALIFIER static typename detail::outerProduct_trait::type call(vec const& c, vec const& r) + { + return detail::compute_outerProduct::call(c, r); + } + }; + + template + struct compute_transpose_type + { + GLM_FUNC_QUALIFIER static mat call(mat const& m) + { + return detail::compute_transpose::value>::call(m); + } + }; + + template + struct compute_determinant_type{ + + GLM_FUNC_QUALIFIER static T call(mat const& m) + { + return detail::compute_determinant::value>::call(m); + } + }; +}//namespace detail +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_projection.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_projection.hpp new file mode 100644 index 000000000000..51fd01bd8ee7 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_projection.hpp @@ -0,0 +1,149 @@ +/// @ref ext_matrix_projection +/// @file glm/ext/matrix_projection.hpp +/// +/// @defgroup ext_matrix_projection GLM_EXT_matrix_projection +/// @ingroup ext +/// +/// Functions that generate common projection transformation matrices. +/// +/// The matrices generated by this extension use standard OpenGL fixed-function +/// conventions. For example, the lookAt function generates a transform from world +/// space into the specific eye space that the projective matrix functions +/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility +/// specifications defines the particular layout of this eye space. +/// +/// Include to use the features of this extension. +/// +/// @see ext_matrix_transform +/// @see ext_matrix_clip_space + +#pragma once + +// Dependencies +#include "../gtc/constants.hpp" +#include "../geometric.hpp" +#include "../trigonometric.hpp" +#include "../matrix.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_projection extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_projection + /// @{ + + /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param obj Specify the object coordinates. + /// @param model Specifies the current modelview matrix + /// @param proj Specifies the current projection matrix + /// @param viewport Specifies the current viewport + /// @return Return the computed window coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluProject man page + template + GLM_FUNC_DECL vec<3, T, Q> projectZO( + vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param obj Specify the object coordinates. + /// @param model Specifies the current modelview matrix + /// @param proj Specifies the current projection matrix + /// @param viewport Specifies the current viewport + /// @return Return the computed window coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluProject man page + template + GLM_FUNC_DECL vec<3, T, Q> projectNO( + vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates using default near and far clip planes definition. + /// To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @param obj Specify the object coordinates. + /// @param model Specifies the current modelview matrix + /// @param proj Specifies the current projection matrix + /// @param viewport Specifies the current viewport + /// @return Return the computed window coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluProject man page + template + GLM_FUNC_DECL vec<3, T, Q> project( + vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param win Specify the window coordinates to be mapped. + /// @param model Specifies the modelview matrix + /// @param proj Specifies the projection matrix + /// @param viewport Specifies the viewport + /// @return Returns the computed object coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluUnProject man page + template + GLM_FUNC_DECL vec<3, T, Q> unProjectZO( + vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param win Specify the window coordinates to be mapped. + /// @param model Specifies the modelview matrix + /// @param proj Specifies the projection matrix + /// @param viewport Specifies the viewport + /// @return Returns the computed object coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluUnProject man page + template + GLM_FUNC_DECL vec<3, T, Q> unProjectNO( + vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates using default near and far clip planes definition. + /// To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @param win Specify the window coordinates to be mapped. + /// @param model Specifies the modelview matrix + /// @param proj Specifies the projection matrix + /// @param viewport Specifies the viewport + /// @return Returns the computed object coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluUnProject man page + template + GLM_FUNC_DECL vec<3, T, Q> unProject( + vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Define a picking region + /// + /// @param center Specify the center of a picking region in window coordinates. + /// @param delta Specify the width and height, respectively, of the picking region in window coordinates. + /// @param viewport Rendering viewport + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluPickMatrix man page + template + GLM_FUNC_DECL mat<4, 4, T, Q> pickMatrix( + vec<2, T, Q> const& center, vec<2, T, Q> const& delta, vec<4, U, Q> const& viewport); + + /// @} +}//namespace glm + +#include "matrix_projection.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_projection.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_projection.inl new file mode 100644 index 000000000000..2f2c196aac5f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_projection.inl @@ -0,0 +1,106 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> projectZO(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { + vec<4, T, Q> tmp = vec<4, T, Q>(obj, static_cast(1)); + tmp = model * tmp; + tmp = proj * tmp; + + tmp /= tmp.w; + tmp.x = tmp.x * static_cast(0.5) + static_cast(0.5); + tmp.y = tmp.y * static_cast(0.5) + static_cast(0.5); + + tmp[0] = tmp[0] * T(viewport[2]) + T(viewport[0]); + tmp[1] = tmp[1] * T(viewport[3]) + T(viewport[1]); + + return vec<3, T, Q>(tmp); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> projectNO(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { + vec<4, T, Q> tmp = vec<4, T, Q>(obj, static_cast(1)); + tmp = model * tmp; + tmp = proj * tmp; + + tmp /= tmp.w; + tmp = tmp * static_cast(0.5) + static_cast(0.5); + tmp[0] = tmp[0] * T(viewport[2]) + T(viewport[0]); + tmp[1] = tmp[1] * T(viewport[3]) + T(viewport[1]); + + return vec<3, T, Q>(tmp); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> project(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return projectZO(obj, model, proj, viewport); +# else + return projectNO(obj, model, proj, viewport); +# endif + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectZO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { + mat<4, 4, T, Q> Inverse = inverse(proj * model); + + vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1)); + tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]); + tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]); + tmp.x = tmp.x * static_cast(2) - static_cast(1); + tmp.y = tmp.y * static_cast(2) - static_cast(1); + + vec<4, T, Q> obj = Inverse * tmp; + obj /= obj.w; + + return vec<3, T, Q>(obj); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectNO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { + mat<4, 4, T, Q> Inverse = inverse(proj * model); + + vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1)); + tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]); + tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]); + tmp = tmp * static_cast(2) - static_cast(1); + + vec<4, T, Q> obj = Inverse * tmp; + obj /= obj.w; + + return vec<3, T, Q>(obj); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> unProject(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return unProjectZO(win, model, proj, viewport); +# else + return unProjectNO(win, model, proj, viewport); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> pickMatrix(vec<2, T, Q> const& center, vec<2, T, Q> const& delta, vec<4, U, Q> const& viewport) + { + assert(delta.x > static_cast(0) && delta.y > static_cast(0)); + mat<4, 4, T, Q> Result(static_cast(1)); + + if(!(delta.x > static_cast(0) && delta.y > static_cast(0))) + return Result; // Error + + vec<3, T, Q> Temp( + (static_cast(viewport[2]) - static_cast(2) * (center.x - static_cast(viewport[0]))) / delta.x, + (static_cast(viewport[3]) - static_cast(2) * (center.y - static_cast(viewport[1]))) / delta.y, + static_cast(0)); + + // Translate and scale the picked region to the entire window + Result = translate(Result, Temp); + return scale(Result, vec<3, T, Q>(static_cast(viewport[2]) / delta.x, static_cast(viewport[3]) / delta.y, static_cast(1))); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_relational.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_relational.hpp new file mode 100644 index 000000000000..20023ad89a0c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_relational.hpp @@ -0,0 +1,132 @@ +/// @ref ext_matrix_relational +/// @file glm/ext/matrix_relational.hpp +/// +/// @defgroup ext_matrix_relational GLM_EXT_matrix_relational +/// @ingroup ext +/// +/// Exposes comparison functions for matrix types that take a user defined epsilon values. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_relational +/// @see ext_scalar_relational +/// @see ext_quaternion_relational + +#pragma once + +// Dependencies +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_relational extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_relational + /// @{ + + /// Perform a component-wise equal-to comparison of two matrices. + /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y); + + /// Perform a component-wise not-equal-to comparison of two matrices. + /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, T epsilon); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, vec const& epsilon); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is not satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, T epsilon); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// True if this expression is not satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, vec const& epsilon); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, int ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, vec const& ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is not satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, int ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is not satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, vec const& ULPs); + + /// @} +}//namespace glm + +#include "matrix_relational.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_relational.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_relational.inl new file mode 100644 index 000000000000..9cd42b772b32 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_relational.inl @@ -0,0 +1,88 @@ +/// @ref ext_vector_relational +/// @file glm/ext/vector_relational.inl + +// Dependency: +#include "../ext/vector_relational.hpp" +#include "../common.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = all(equal(a[i], b[i])); + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, T Epsilon) + { + return equal(a, b, vec(Epsilon)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, vec const& Epsilon) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = all(equal(a[i], b[i], Epsilon[i])); + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = any(notEqual(a[i], b[i])); + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, T Epsilon) + { + return notEqual(a, b, vec(Epsilon)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, vec const& Epsilon) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = any(notEqual(a[i], b[i], Epsilon[i])); + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, int MaxULPs) + { + return equal(a, b, vec(MaxULPs)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, vec const& MaxULPs) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = all(equal(a[i], b[i], MaxULPs[i])); + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, int MaxULPs) + { + return notEqual(a, b, vec(MaxULPs)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, vec const& MaxULPs) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = any(notEqual(a[i], b[i], MaxULPs[i])); + return Result; + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_transform.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_transform.hpp new file mode 100644 index 000000000000..52695b8bf49d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_transform.hpp @@ -0,0 +1,171 @@ +/// @ref ext_matrix_transform +/// @file glm/ext/matrix_transform.hpp +/// +/// @defgroup ext_matrix_transform GLM_EXT_matrix_transform +/// @ingroup ext +/// +/// Defines functions that generate common transformation matrices. +/// +/// The matrices generated by this extension use standard OpenGL fixed-function +/// conventions. For example, the lookAt function generates a transform from world +/// space into the specific eye space that the projective matrix functions +/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility +/// specifications defines the particular layout of this eye space. +/// +/// Include to use the features of this extension. +/// +/// @see ext_matrix_projection +/// @see ext_matrix_clip_space + +#pragma once + +// Dependencies +#include "../gtc/constants.hpp" +#include "../geometric.hpp" +#include "../trigonometric.hpp" +#include "../matrix.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_transform extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_transform + /// @{ + + /// Builds an identity matrix. + template + GLM_FUNC_DECL GLM_CONSTEXPR genType identity(); + + /// Builds a translation 4 * 4 matrix created from a vector of 3 components. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param v Coordinates of a translation vector. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @code + /// #include + /// #include + /// ... + /// glm::mat4 m = glm::translate(glm::mat4(1.0f), glm::vec3(1.0f)); + /// // m[0][0] == 1.0f, m[0][1] == 0.0f, m[0][2] == 0.0f, m[0][3] == 0.0f + /// // m[1][0] == 0.0f, m[1][1] == 1.0f, m[1][2] == 0.0f, m[1][3] == 0.0f + /// // m[2][0] == 0.0f, m[2][1] == 0.0f, m[2][2] == 1.0f, m[2][3] == 0.0f + /// // m[3][0] == 1.0f, m[3][1] == 1.0f, m[3][2] == 1.0f, m[3][3] == 1.0f + /// @endcode + /// + /// @see - translate(mat<4, 4, T, Q> const& m, T x, T y, T z) + /// @see - translate(vec<3, T, Q> const& v) + /// @see glTranslate man page + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> translate( + mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v); + + /// Builds a rotation 4 * 4 matrix created from an axis vector and an angle. + /// + /// @param m Input matrix multiplied by this rotation matrix. + /// @param angle Rotation angle expressed in radians. + /// @param axis Rotation axis, recommended to be normalized. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - rotate(mat<4, 4, T, Q> const& m, T angle, T x, T y, T z) + /// @see - rotate(T angle, vec<3, T, Q> const& v) + /// @see glRotate man page + template + GLM_FUNC_DECL mat<4, 4, T, Q> rotate( + mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& axis); + + /// Builds a scale 4 * 4 matrix created from 3 scalars. + /// + /// @param m Input matrix multiplied by this scale matrix. + /// @param v Ratio of scaling for each axis. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - scale(mat<4, 4, T, Q> const& m, T x, T y, T z) + /// @see - scale(vec<3, T, Q> const& v) + /// @see glScale man page + template + GLM_FUNC_DECL mat<4, 4, T, Q> scale( + mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v); + + /// Builds a scale 4 * 4 matrix created from point referent 3 shearers. + /// + /// @param m Input matrix multiplied by this shear matrix. + /// @param p Point of shearing as reference. + /// @param l_x Ratio of matrix.x projection in YZ plane relative to the y-axis/z-axis. + /// @param l_y Ratio of matrix.y projection in XZ plane relative to the x-axis/z-axis. + /// @param l_z Ratio of matrix.z projection in XY plane relative to the x-axis/y-axis. + /// + /// as example: + /// [1 , l_xy, l_xz, -(l_xy+l_xz) * p_x] [x] T + /// [x`, y`, z`, w`] = [x`, y`, z`, w`] * [l_yx, 1 , l_yz, -(l_yx+l_yz) * p_y] [y] + /// [l_zx, l_zy, 1 , -(l_zx+l_zy) * p_z] [z] + /// [0 , 0 , 0 , 1 ] [w] + /// + /// @tparam T A floating-point shear type + /// @tparam Q A value from qualifier enum + /// + /// @see - shear(mat<4, 4, T, Q> const& m, T x, T y, T z) + /// @see - shear(vec<3, T, Q> const& p) + /// @see - shear(vec<2, T, Q> const& l_x) + /// @see - shear(vec<2, T, Q> const& l_y) + /// @see - shear(vec<2, T, Q> const& l_z) + /// @see no resource... + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear( + mat<4, 4, T, Q> const &m, vec<3, T, Q> const& p, vec<2, T, Q> const &l_x, vec<2, T, Q> const &l_y, vec<2, T, Q> const &l_z); + + /// Build a right handed look at view matrix. + /// + /// @param eye Position of the camera + /// @param center Position where the camera is looking at + /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) + template + GLM_FUNC_DECL mat<4, 4, T, Q> lookAtRH( + vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); + + /// Build a left handed look at view matrix. + /// + /// @param eye Position of the camera + /// @param center Position where the camera is looking at + /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) + template + GLM_FUNC_DECL mat<4, 4, T, Q> lookAtLH( + vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); + + /// Build a look at view matrix based on the default handedness. + /// + /// @param eye Position of the camera + /// @param center Position where the camera is looking at + /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) + /// @see gluLookAt man page + template + GLM_FUNC_DECL mat<4, 4, T, Q> lookAt( + vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); + + /// @} +}//namespace glm + +#include "matrix_transform.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_transform.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_transform.inl new file mode 100644 index 000000000000..40459bbb71bd --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_transform.inl @@ -0,0 +1,207 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType identity() + { + return detail::init_gentype::GENTYPE>::identity(); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> translate(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) + { + mat<4, 4, T, Q> Result(m); + Result[3] = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate(mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& v) + { + T const a = angle; + T const c = cos(a); + T const s = sin(a); + + vec<3, T, Q> axis(normalize(v)); + vec<3, T, Q> temp((T(1) - c) * axis); + + mat<4, 4, T, Q> Rotate; + Rotate[0][0] = c + temp[0] * axis[0]; + Rotate[0][1] = temp[0] * axis[1] + s * axis[2]; + Rotate[0][2] = temp[0] * axis[2] - s * axis[1]; + + Rotate[1][0] = temp[1] * axis[0] - s * axis[2]; + Rotate[1][1] = c + temp[1] * axis[1]; + Rotate[1][2] = temp[1] * axis[2] + s * axis[0]; + + Rotate[2][0] = temp[2] * axis[0] + s * axis[1]; + Rotate[2][1] = temp[2] * axis[1] - s * axis[0]; + Rotate[2][2] = c + temp[2] * axis[2]; + + mat<4, 4, T, Q> Result; + Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; + Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; + Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; + Result[3] = m[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate_slow(mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& v) + { + T const a = angle; + T const c = cos(a); + T const s = sin(a); + mat<4, 4, T, Q> Result; + + vec<3, T, Q> axis = normalize(v); + + Result[0][0] = c + (static_cast(1) - c) * axis.x * axis.x; + Result[0][1] = (static_cast(1) - c) * axis.x * axis.y + s * axis.z; + Result[0][2] = (static_cast(1) - c) * axis.x * axis.z - s * axis.y; + Result[0][3] = static_cast(0); + + Result[1][0] = (static_cast(1) - c) * axis.y * axis.x - s * axis.z; + Result[1][1] = c + (static_cast(1) - c) * axis.y * axis.y; + Result[1][2] = (static_cast(1) - c) * axis.y * axis.z + s * axis.x; + Result[1][3] = static_cast(0); + + Result[2][0] = (static_cast(1) - c) * axis.z * axis.x + s * axis.y; + Result[2][1] = (static_cast(1) - c) * axis.z * axis.y - s * axis.x; + Result[2][2] = c + (static_cast(1) - c) * axis.z * axis.z; + Result[2][3] = static_cast(0); + + Result[3] = vec<4, T, Q>(0, 0, 0, 1); + return m * Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) + { + mat<4, 4, T, Q> Result; + Result[0] = m[0] * v[0]; + Result[1] = m[1] * v[1]; + Result[2] = m[2] * v[2]; + Result[3] = m[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale_slow(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) + { + mat<4, 4, T, Q> Result(T(1)); + Result[0][0] = v.x; + Result[1][1] = v.y; + Result[2][2] = v.z; + return m * Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear(mat<4, 4, T, Q> const &m, vec<3, T, Q> const& p, vec<2, T, Q> const &l_x, vec<2, T, Q> const &l_y, vec<2, T, Q> const &l_z) + { + T const lambda_xy = l_x[0]; + T const lambda_xz = l_x[1]; + T const lambda_yx = l_y[0]; + T const lambda_yz = l_y[1]; + T const lambda_zx = l_z[0]; + T const lambda_zy = l_z[1]; + + vec<3, T, Q> point_lambda = vec<3, T, Q>( + (lambda_xy + lambda_xz), (lambda_yx + lambda_yz), (lambda_zx + lambda_zy) + ); + + mat<4, 4, T, Q> Shear = mat<4, 4, T, Q>( + 1 , lambda_yx , lambda_zx , 0, + lambda_xy , 1 , lambda_zy , 0, + lambda_xz , lambda_yz , 1 , 0, + -point_lambda[0] * p[0], -point_lambda[1] * p[1], -point_lambda[2] * p[2], 1 + ); + + mat<4, 4, T, Q> Result; + Result[0] = Shear[0] * m[0][0] + Shear[1] * m[0][1] + Shear[2] * m[0][2] + Shear[3] * m[0][3]; + Result[1] = Shear[0] * m[1][0] + Shear[1] * m[1][1] + Shear[2] * m[1][2] + Shear[3] * m[1][3]; + Result[2] = Shear[0] * m[2][0] + Shear[1] * m[2][1] + Shear[2] * m[2][2] + Shear[3] * m[2][3]; + Result[3] = Shear[0] * m[3][0] + Shear[1] * m[3][1] + Shear[2] * m[3][2] + Shear[3] * m[3][3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear_slow(mat<4, 4, T, Q> const &m, vec<3, T, Q> const& p, vec<2, T, Q> const &l_x, vec<2, T, Q> const &l_y, vec<2, T, Q> const &l_z) + { + T const lambda_xy = static_cast(l_x[0]); + T const lambda_xz = static_cast(l_x[1]); + T const lambda_yx = static_cast(l_y[0]); + T const lambda_yz = static_cast(l_y[1]); + T const lambda_zx = static_cast(l_z[0]); + T const lambda_zy = static_cast(l_z[1]); + + vec<3, T, Q> point_lambda = vec<3, T, Q>( + static_cast(lambda_xy + lambda_xz), + static_cast(lambda_yx + lambda_yz), + static_cast(lambda_zx + lambda_zy) + ); + + mat<4, 4, T, Q> Shear = mat<4, 4, T, Q>( + 1 , lambda_yx , lambda_zx , 0, + lambda_xy , 1 , lambda_zy , 0, + lambda_xz , lambda_yz , 1 , 0, + -point_lambda[0] * p[0], -point_lambda[1] * p[1], -point_lambda[2] * p[2], 1 + ); + return m * Shear; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAtRH(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) + { + vec<3, T, Q> const f(normalize(center - eye)); + vec<3, T, Q> const s(normalize(cross(f, up))); + vec<3, T, Q> const u(cross(s, f)); + + mat<4, 4, T, Q> Result(1); + Result[0][0] = s.x; + Result[1][0] = s.y; + Result[2][0] = s.z; + Result[0][1] = u.x; + Result[1][1] = u.y; + Result[2][1] = u.z; + Result[0][2] =-f.x; + Result[1][2] =-f.y; + Result[2][2] =-f.z; + Result[3][0] =-dot(s, eye); + Result[3][1] =-dot(u, eye); + Result[3][2] = dot(f, eye); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAtLH(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) + { + vec<3, T, Q> const f(normalize(center - eye)); + vec<3, T, Q> const s(normalize(cross(up, f))); + vec<3, T, Q> const u(cross(f, s)); + + mat<4, 4, T, Q> Result(1); + Result[0][0] = s.x; + Result[1][0] = s.y; + Result[2][0] = s.z; + Result[0][1] = u.x; + Result[1][1] = u.y; + Result[2][1] = u.z; + Result[0][2] = f.x; + Result[1][2] = f.y; + Result[2][2] = f.z; + Result[3][0] = -dot(s, eye); + Result[3][1] = -dot(u, eye); + Result[3][2] = -dot(f, eye); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAt(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) + { +# if (GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT) + return lookAtLH(eye, center, up); +# else + return lookAtRH(eye, center, up); +# endif + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x2.hpp new file mode 100644 index 000000000000..034771ae5225 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x2.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_uint2x2 +/// @file glm/ext/matrix_uint2x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x2 GLM_EXT_matrix_uint2x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x2 + /// @{ + + /// Unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2 + typedef mat<2, 2, uint, defaultp> umat2x2; + + /// Unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2 + typedef mat<2, 2, uint, defaultp> umat2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x2_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x2_sized.hpp new file mode 100644 index 000000000000..4555324d2b52 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x2_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_uint2x2_sized +/// @file glm/ext/matrix_uint2x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x2_sized GLM_EXT_matrix_uint2x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x2.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x2_sized + /// @{ + + /// 8 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint8, defaultp> u8mat2x2; + + /// 16 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint16, defaultp> u16mat2x2; + + /// 32 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint32, defaultp> u32mat2x2; + + /// 64 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint64, defaultp> u64mat2x2; + + + /// 8 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint8, defaultp> u8mat2; + + /// 16 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint16, defaultp> u16mat2; + + /// 32 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint32, defaultp> u32mat2; + + /// 64 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint64, defaultp> u64mat2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x3.hpp new file mode 100644 index 000000000000..f496c531a0b8 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x3.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint2x3 +/// @file glm/ext/matrix_uint2x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x3 GLM_EXT_matrix_uint2x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x3 + /// @{ + + /// Unsigned integer 2x3 matrix. + /// + /// @see ext_matrix_uint2x3 + typedef mat<2, 3, uint, defaultp> umat2x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x3_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x3_sized.hpp new file mode 100644 index 000000000000..db7939c94658 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint2x3_sized +/// @file glm/ext/matrix_uint2x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x3_sized GLM_EXT_matrix_uint2x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x3.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x3_sized + /// @{ + + /// 8 bit unsigned integer 2x3 matrix. + /// + /// @see ext_matrix_uint2x3_sized + typedef mat<2, 3, uint8, defaultp> u8mat2x3; + + /// 16 bit unsigned integer 2x3 matrix. + /// + /// @see ext_matrix_uint2x3_sized + typedef mat<2, 3, uint16, defaultp> u16mat2x3; + + /// 32 bit unsigned integer 2x3 matrix. + /// + /// @see ext_matrix_uint2x3_sized + typedef mat<2, 3, uint32, defaultp> u32mat2x3; + + /// 64 bit unsigned integer 2x3 matrix. + /// + /// @see ext_matrix_uint2x3_sized + typedef mat<2, 3, uint64, defaultp> u64mat2x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x4.hpp new file mode 100644 index 000000000000..0f993509c23b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x4.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint2x4 +/// @file glm/ext/matrix_uint2x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x4 GLM_EXT_matrix_int2x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x4 + /// @{ + + /// Unsigned integer 2x4 matrix. + /// + /// @see ext_matrix_uint2x4 + typedef mat<2, 4, uint, defaultp> umat2x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x4_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x4_sized.hpp new file mode 100644 index 000000000000..5c55547ff76a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint2x4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint2x4_sized +/// @file glm/ext/matrix_uint2x4_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x4_sized GLM_EXT_matrix_uint2x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x4.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x4_sized + /// @{ + + /// 8 bit unsigned integer 2x4 matrix. + /// + /// @see ext_matrix_uint2x4_sized + typedef mat<2, 4, uint8, defaultp> u8mat2x4; + + /// 16 bit unsigned integer 2x4 matrix. + /// + /// @see ext_matrix_uint2x4_sized + typedef mat<2, 4, uint16, defaultp> u16mat2x4; + + /// 32 bit unsigned integer 2x4 matrix. + /// + /// @see ext_matrix_uint2x4_sized + typedef mat<2, 4, uint32, defaultp> u32mat2x4; + + /// 64 bit unsigned integer 2x4 matrix. + /// + /// @see ext_matrix_uint2x4_sized + typedef mat<2, 4, uint64, defaultp> u64mat2x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x2.hpp new file mode 100644 index 000000000000..55a9bed688e9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x2.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint3x2 +/// @file glm/ext/matrix_uint3x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x2 GLM_EXT_matrix_uint3x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x2 + /// @{ + + /// Unsigned integer 3x2 matrix. + /// + /// @see ext_matrix_uint3x2 + typedef mat<3, 2, uint, defaultp> umat3x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x2_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x2_sized.hpp new file mode 100644 index 000000000000..c81af8f968d6 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint3x2_sized +/// @file glm/ext/matrix_uint3x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x2_sized GLM_EXT_matrix_uint3x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x2.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x2_sized + /// @{ + + /// 8 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_uint3x2_sized + typedef mat<3, 2, uint8, defaultp> u8mat3x2; + + /// 16 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_uint3x2_sized + typedef mat<3, 2, uint16, defaultp> u16mat3x2; + + /// 32 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_uint3x2_sized + typedef mat<3, 2, uint32, defaultp> u32mat3x2; + + /// 64 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_uint3x2_sized + typedef mat<3, 2, uint64, defaultp> u64mat3x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x3.hpp new file mode 100644 index 000000000000..1004c0d2d541 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x3.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_uint3x3 +/// @file glm/ext/matrix_uint3x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x3 GLM_EXT_matrix_uint3x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x3 + /// @{ + + /// Unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3 + typedef mat<3, 3, uint, defaultp> umat3x3; + + /// Unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3 + typedef mat<3, 3, uint, defaultp> umat3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x3_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x3_sized.hpp new file mode 100644 index 000000000000..41a8be748660 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x3_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_uint3x3_sized +/// @file glm/ext/matrix_uint3x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x3_sized GLM_EXT_matrix_uint3x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x3.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x3_sized + /// @{ + + /// 8 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint8, defaultp> u8mat3x3; + + /// 16 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint16, defaultp> u16mat3x3; + + /// 32 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint32, defaultp> u32mat3x3; + + /// 64 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint64, defaultp> u64mat3x3; + + + /// 8 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint8, defaultp> u8mat3; + + /// 16 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint16, defaultp> u16mat3; + + /// 32 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint32, defaultp> u32mat3; + + /// 64 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint64, defaultp> u64mat3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x4.hpp new file mode 100644 index 000000000000..c6dd78c4a053 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x4.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint3x4 +/// @file glm/ext/matrix_uint3x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x4 GLM_EXT_matrix_uint3x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x4 + /// @{ + + /// Signed integer 3x4 matrix. + /// + /// @see ext_matrix_uint3x4 + typedef mat<3, 4, uint, defaultp> umat3x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x4_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x4_sized.hpp new file mode 100644 index 000000000000..2ce28ad816fc --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint3x4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint3x4_sized +/// @file glm/ext/matrix_uint3x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x4_sized GLM_EXT_matrix_uint3x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x4.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x4_sized + /// @{ + + /// 8 bit unsigned integer 3x4 matrix. + /// + /// @see ext_matrix_uint3x4_sized + typedef mat<3, 4, uint8, defaultp> u8mat3x4; + + /// 16 bit unsigned integer 3x4 matrix. + /// + /// @see ext_matrix_uint3x4_sized + typedef mat<3, 4, uint16, defaultp> u16mat3x4; + + /// 32 bit unsigned integer 3x4 matrix. + /// + /// @see ext_matrix_uint3x4_sized + typedef mat<3, 4, uint32, defaultp> u32mat3x4; + + /// 64 bit unsigned integer 3x4 matrix. + /// + /// @see ext_matrix_uint3x4_sized + typedef mat<3, 4, uint64, defaultp> u64mat3x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x2.hpp new file mode 100644 index 000000000000..0446f5745bde --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x2.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint4x2 +/// @file glm/ext/matrix_uint4x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x2 GLM_EXT_matrix_uint4x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x2 + /// @{ + + /// Unsigned integer 4x2 matrix. + /// + /// @see ext_matrix_uint4x2 + typedef mat<4, 2, uint, defaultp> umat4x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x2_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x2_sized.hpp new file mode 100644 index 000000000000..57a66bf9b8b0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint4x2_sized +/// @file glm/ext/matrix_uint4x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x2_sized GLM_EXT_matrix_uint4x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x2.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x2_sized + /// @{ + + /// 8 bit unsigned integer 4x2 matrix. + /// + /// @see ext_matrix_uint4x2_sized + typedef mat<4, 2, uint8, defaultp> u8mat4x2; + + /// 16 bit unsigned integer 4x2 matrix. + /// + /// @see ext_matrix_uint4x2_sized + typedef mat<4, 2, uint16, defaultp> u16mat4x2; + + /// 32 bit unsigned integer 4x2 matrix. + /// + /// @see ext_matrix_uint4x2_sized + typedef mat<4, 2, uint32, defaultp> u32mat4x2; + + /// 64 bit unsigned integer 4x2 matrix. + /// + /// @see ext_matrix_uint4x2_sized + typedef mat<4, 2, uint64, defaultp> u64mat4x2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x3.hpp new file mode 100644 index 000000000000..54c24e4e50b1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x3.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint4x3 +/// @file glm/ext/matrix_uint4x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x3 GLM_EXT_matrix_uint4x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x3 + /// @{ + + /// Unsigned integer 4x3 matrix. + /// + /// @see ext_matrix_uint4x3 + typedef mat<4, 3, uint, defaultp> umat4x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x3_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x3_sized.hpp new file mode 100644 index 000000000000..2e61124d63b4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint4x3_sized +/// @file glm/ext/matrix_uint4x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x3_sized GLM_EXT_matrix_uint4x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x3.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x3_sized + /// @{ + + /// 8 bit unsigned integer 4x3 matrix. + /// + /// @see ext_matrix_uint4x3_sized + typedef mat<4, 3, uint8, defaultp> u8mat4x3; + + /// 16 bit unsigned integer 4x3 matrix. + /// + /// @see ext_matrix_uint4x3_sized + typedef mat<4, 3, uint16, defaultp> u16mat4x3; + + /// 32 bit unsigned integer 4x3 matrix. + /// + /// @see ext_matrix_uint4x3_sized + typedef mat<4, 3, uint32, defaultp> u32mat4x3; + + /// 64 bit unsigned integer 4x3 matrix. + /// + /// @see ext_matrix_uint4x3_sized + typedef mat<4, 3, uint64, defaultp> u64mat4x3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x4.hpp new file mode 100644 index 000000000000..5cc84553d936 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x4.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_uint4x4 +/// @file glm/ext/matrix_uint4x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x4 GLM_EXT_matrix_uint4x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x4 + /// @{ + + /// Unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4 + typedef mat<4, 4, uint, defaultp> umat4x4; + + /// Unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4 + typedef mat<4, 4, uint, defaultp> umat4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x4_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x4_sized.hpp new file mode 100644 index 000000000000..bb10bd2b77d1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/matrix_uint4x4_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_uint4x4_sized +/// @file glm/ext/matrix_uint4x4_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x4_sized GLM_EXT_matrix_uint4x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x4.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x4_sized + /// @{ + + /// 8 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint8, defaultp> u8mat4x4; + + /// 16 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint16, defaultp> u16mat4x4; + + /// 32 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint32, defaultp> u32mat4x4; + + /// 64 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint64, defaultp> u64mat4x4; + + + /// 8 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint8, defaultp> u8mat4; + + /// 16 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint16, defaultp> u16mat4; + + /// 32 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint32, defaultp> u32mat4; + + /// 64 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint64, defaultp> u64mat4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common.hpp new file mode 100644 index 000000000000..f738692a472e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common.hpp @@ -0,0 +1,135 @@ +/// @ref ext_quaternion_common +/// @file glm/ext/quaternion_common.hpp +/// +/// @defgroup ext_quaternion_common GLM_EXT_quaternion_common +/// @ingroup ext +/// +/// Provides common functions for quaternion types +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_common +/// @see ext_vector_common +/// @see ext_quaternion_float +/// @see ext_quaternion_double +/// @see ext_quaternion_exponential +/// @see ext_quaternion_geometric +/// @see ext_quaternion_relational +/// @see ext_quaternion_trigonometric +/// @see ext_quaternion_transform + +#pragma once + +// Dependency: +#include "../ext/scalar_constants.hpp" +#include "../ext/quaternion_geometric.hpp" +#include "../common.hpp" +#include "../trigonometric.hpp" +#include "../exponential.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_common extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_common + /// @{ + + /// Spherical linear interpolation of two quaternions. + /// The interpolation is oriented and the rotation is performed at constant speed. + /// For short path spherical linear interpolation, use the slerp function. + /// + /// @param x A quaternion + /// @param y A quaternion + /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - slerp(qua const& x, qua const& y, T const& a) + template + GLM_FUNC_DECL qua mix(qua const& x, qua const& y, T a); + + /// Linear interpolation of two quaternions. + /// The interpolation is oriented. + /// + /// @param x A quaternion + /// @param y A quaternion + /// @param a Interpolation factor. The interpolation is defined in the range [0, 1]. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR qua lerp(qua const& x, qua const& y, T a); + + /// Spherical linear interpolation of two quaternions. + /// The interpolation always take the short path and the rotation is performed at constant speed. + /// + /// @param x A quaternion + /// @param y A quaternion + /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua slerp(qua const& x, qua const& y, T a); + + /// Spherical linear interpolation of two quaternions with multiple spins over rotation axis. + /// The interpolation always take the short path when the spin count is positive and long path + /// when count is negative. Rotation is performed at constant speed. + /// + /// @param x A quaternion + /// @param y A quaternion + /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. + /// @param k Additional spin count. If Value is negative interpolation will be on "long" path. + /// + /// @tparam T A floating-point scalar type + /// @tparam S An integer scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua slerp(qua const& x, qua const& y, T a, S k); + + /// Returns the q conjugate. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR qua conjugate(qua const& q); + + /// Returns the q inverse. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR qua inverse(qua const& q); + + /// Returns true if x holds a NaN (not a number) + /// representation in the underlying implementation's set of + /// floating point representations. Returns false otherwise, + /// including for implementations with no NaN + /// representations. + /// + /// /!\ When using compiler fast math, this function may fail. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> isnan(qua const& x); + + /// Returns true if x holds a positive infinity or negative + /// infinity representation in the underlying implementation's + /// set of floating point representations. Returns false + /// otherwise, including for implementations with no infinity + /// representations. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> isinf(qua const& x); + + /// @} +} //namespace glm + +#include "quaternion_common.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common.inl new file mode 100644 index 000000000000..ad171f9d4bdf --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common.inl @@ -0,0 +1,144 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER qua mix(qua const& x, qua const& y, T a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'mix' only accept floating-point inputs"); + + T const cosTheta = dot(x, y); + + // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator + if(cosTheta > static_cast(1) - epsilon()) + { + // Linear interpolation + return qua::wxyz( + mix(x.w, y.w, a), + mix(x.x, y.x, a), + mix(x.y, y.y, a), + mix(x.z, y.z, a)); + } + else + { + // Essential Mathematics, page 467 + T angle = acos(cosTheta); + return (sin((static_cast(1) - a) * angle) * x + sin(a * angle) * y) / sin(angle); + } + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua lerp(qua const& x, qua const& y, T a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'lerp' only accept floating-point inputs"); + + // Lerp is only defined in [0, 1] + assert(a >= static_cast(0)); + assert(a <= static_cast(1)); + + return x * (static_cast(1) - a) + (y * a); + } + + template + GLM_FUNC_QUALIFIER qua slerp(qua const& x, qua const& y, T a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'slerp' only accept floating-point inputs"); + + qua z = y; + + T cosTheta = dot(x, y); + + // If cosTheta < 0, the interpolation will take the long way around the sphere. + // To fix this, one quat must be negated. + if(cosTheta < static_cast(0)) + { + z = -y; + cosTheta = -cosTheta; + } + + // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator + if(cosTheta > static_cast(1) - epsilon()) + { + // Linear interpolation + return qua::wxyz( + mix(x.w, z.w, a), + mix(x.x, z.x, a), + mix(x.y, z.y, a), + mix(x.z, z.z, a)); + } + else + { + // Essential Mathematics, page 467 + T angle = acos(cosTheta); + return (sin((static_cast(1) - a) * angle) * x + sin(a * angle) * z) / sin(angle); + } + } + + template + GLM_FUNC_QUALIFIER qua slerp(qua const& x, qua const& y, T a, S k) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'slerp' only accept floating-point inputs"); + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'slerp' only accept integer for spin count"); + + qua z = y; + + T cosTheta = dot(x, y); + + // If cosTheta < 0, the interpolation will take the long way around the sphere. + // To fix this, one quat must be negated. + if (cosTheta < static_cast(0)) + { + z = -y; + cosTheta = -cosTheta; + } + + // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator + if (cosTheta > static_cast(1) - epsilon()) + { + // Linear interpolation + return qua::wxyz( + mix(x.w, z.w, a), + mix(x.x, z.x, a), + mix(x.y, z.y, a), + mix(x.z, z.z, a)); + } + else + { + // Graphics Gems III, page 96 + T angle = acos(cosTheta); + T phi = angle + static_cast(k) * glm::pi(); + return (sin(angle - a * phi)* x + sin(a * phi) * z) / sin(angle); + } + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua conjugate(qua const& q) + { + return qua::wxyz(q.w, -q.x, -q.y, -q.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua inverse(qua const& q) + { + return conjugate(q) / dot(q, q); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> isnan(qua const& q) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isnan' only accept floating-point inputs"); + + return vec<4, bool, Q>(isnan(q.x), isnan(q.y), isnan(q.z), isnan(q.w)); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> isinf(qua const& q) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isinf' only accept floating-point inputs"); + + return vec<4, bool, Q>(isinf(q.x), isinf(q.y), isinf(q.z), isinf(q.w)); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "quaternion_common_simd.inl" +#endif + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common_simd.inl new file mode 100644 index 000000000000..ddfc8a44f6a3 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_common_simd.inl @@ -0,0 +1,18 @@ +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm{ +namespace detail +{ + template + struct compute_dot, float, true> + { + static GLM_FUNC_QUALIFIER float call(qua const& x, qua const& y) + { + return _mm_cvtss_f32(glm_vec1_dot(x.data, y.data)); + } + }; +}//namespace detail +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_double.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_double.hpp new file mode 100644 index 000000000000..63b24de4d52a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_double.hpp @@ -0,0 +1,39 @@ +/// @ref ext_quaternion_double +/// @file glm/ext/quaternion_double.hpp +/// +/// @defgroup ext_quaternion_double GLM_EXT_quaternion_double +/// @ingroup ext +/// +/// Exposes double-precision floating point quaternion type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_quaternion_float +/// @see ext_quaternion_double_precision +/// @see ext_quaternion_common +/// @see ext_quaternion_exponential +/// @see ext_quaternion_geometric +/// @see ext_quaternion_relational +/// @see ext_quaternion_transform +/// @see ext_quaternion_trigonometric + +#pragma once + +// Dependency: +#include "../detail/type_quat.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_double extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_double + /// @{ + + /// Quaternion of double-precision floating-point numbers. + typedef qua dquat; + + /// @} +} //namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_double_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_double_precision.hpp new file mode 100644 index 000000000000..8aa24a17752d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_double_precision.hpp @@ -0,0 +1,42 @@ +/// @ref ext_quaternion_double_precision +/// @file glm/ext/quaternion_double_precision.hpp +/// +/// @defgroup ext_quaternion_double_precision GLM_EXT_quaternion_double_precision +/// @ingroup ext +/// +/// Exposes double-precision floating point quaternion type with various precision in term of ULPs. +/// +/// Include to use the features of this extension. + +#pragma once + +// Dependency: +#include "../detail/type_quat.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_double_precision extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_double_precision + /// @{ + + /// Quaternion of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see ext_quaternion_double_precision + typedef qua lowp_dquat; + + /// Quaternion of medium double-qualifier floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see ext_quaternion_double_precision + typedef qua mediump_dquat; + + /// Quaternion of high double-qualifier floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see ext_quaternion_double_precision + typedef qua highp_dquat; + + /// @} +} //namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_exponential.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_exponential.hpp new file mode 100644 index 000000000000..affe2979aad5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_exponential.hpp @@ -0,0 +1,63 @@ +/// @ref ext_quaternion_exponential +/// @file glm/ext/quaternion_exponential.hpp +/// +/// @defgroup ext_quaternion_exponential GLM_EXT_quaternion_exponential +/// @ingroup ext +/// +/// Provides exponential functions for quaternion types +/// +/// Include to use the features of this extension. +/// +/// @see core_exponential +/// @see ext_quaternion_float +/// @see ext_quaternion_double + +#pragma once + +// Dependency: +#include "../common.hpp" +#include "../trigonometric.hpp" +#include "../geometric.hpp" +#include "../ext/scalar_constants.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_exponential extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_transform + /// @{ + + /// Returns a exponential of a quaternion. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua exp(qua const& q); + + /// Returns a logarithm of a quaternion + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua log(qua const& q); + + /// Returns a quaternion raised to a power. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua pow(qua const& q, T y); + + /// Returns the square root of a quaternion + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua sqrt(qua const& q); + + /// @} +} //namespace glm + +#include "quaternion_exponential.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_exponential.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_exponential.inl new file mode 100644 index 000000000000..8a9d774b396a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_exponential.inl @@ -0,0 +1,89 @@ +#include "scalar_constants.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER qua exp(qua const& q) + { + vec<3, T, Q> u(q.x, q.y, q.z); + T const Angle = glm::length(u); + if (Angle < epsilon()) + return qua(); + + vec<3, T, Q> const v(u / Angle); + return qua(cos(Angle), sin(Angle) * v); + } + + template + GLM_FUNC_QUALIFIER qua log(qua const& q) + { + vec<3, T, Q> u(q.x, q.y, q.z); + T Vec3Len = length(u); + + if (Vec3Len < epsilon()) + { + if(q.w > static_cast(0)) + return qua::wxyz(log(q.w), static_cast(0), static_cast(0), static_cast(0)); + else if(q.w < static_cast(0)) + return qua::wxyz(log(-q.w), pi(), static_cast(0), static_cast(0)); + else + return qua::wxyz(std::numeric_limits::infinity(), std::numeric_limits::infinity(), std::numeric_limits::infinity(), std::numeric_limits::infinity()); + } + else + { + T t = atan(Vec3Len, T(q.w)) / Vec3Len; + T QuatLen2 = Vec3Len * Vec3Len + q.w * q.w; + return qua::wxyz(static_cast(0.5) * log(QuatLen2), t * q.x, t * q.y, t * q.z); + } + } + + template + GLM_FUNC_QUALIFIER qua pow(qua const& x, T y) + { + //Raising to the power of 0 should yield 1 + //Needed to prevent a division by 0 error later on + if(y > -epsilon() && y < epsilon()) + return qua::wxyz(1,0,0,0); + + //To deal with non-unit quaternions + T magnitude = sqrt(x.x * x.x + x.y * x.y + x.z * x.z + x.w *x.w); + + T Angle; + if(abs(x.w / magnitude) > cos_one_over_two()) + { + //Scalar component is close to 1; using it to recover angle would lose precision + //Instead, we use the non-scalar components since sin() is accurate around 0 + + //Prevent a division by 0 error later on + T VectorMagnitude = x.x * x.x + x.y * x.y + x.z * x.z; + //Despite the compiler might say, we actually want to compare + //VectorMagnitude to 0. here; we could use denorm_int() compiling a + //project with unsafe maths optimizations might make the comparison + //always false, even when VectorMagnitude is 0. + if (VectorMagnitude < std::numeric_limits::min()) { + //Equivalent to raising a real number to a power + return qua::wxyz(pow(x.w, y), 0, 0, 0); + } + + Angle = asin(sqrt(VectorMagnitude) / magnitude); + } + else + { + //Scalar component is small, shouldn't cause loss of precision + Angle = acos(x.w / magnitude); + } + + T NewAngle = Angle * y; + T Div = sin(NewAngle) / sin(Angle); + T Mag = pow(magnitude, y - static_cast(1)); + return qua::wxyz(cos(NewAngle) * magnitude * Mag, x.x * Div * Mag, x.y * Div * Mag, x.z * Div * Mag); + } + + template + GLM_FUNC_QUALIFIER qua sqrt(qua const& x) + { + return pow(x, static_cast(0.5)); + } +}//namespace glm + + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_float.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_float.hpp new file mode 100644 index 000000000000..ca42a60597f4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_float.hpp @@ -0,0 +1,39 @@ +/// @ref ext_quaternion_float +/// @file glm/ext/quaternion_float.hpp +/// +/// @defgroup ext_quaternion_float GLM_EXT_quaternion_float +/// @ingroup ext +/// +/// Exposes single-precision floating point quaternion type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_quaternion_double +/// @see ext_quaternion_float_precision +/// @see ext_quaternion_common +/// @see ext_quaternion_exponential +/// @see ext_quaternion_geometric +/// @see ext_quaternion_relational +/// @see ext_quaternion_transform +/// @see ext_quaternion_trigonometric + +#pragma once + +// Dependency: +#include "../detail/type_quat.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_float extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_float + /// @{ + + /// Quaternion of single-precision floating-point numbers. + typedef qua quat; + + /// @} +} //namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_float_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_float_precision.hpp new file mode 100644 index 000000000000..f9e4f5c21d90 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_float_precision.hpp @@ -0,0 +1,36 @@ +/// @ref ext_quaternion_float_precision +/// @file glm/ext/quaternion_float_precision.hpp +/// +/// @defgroup ext_quaternion_float_precision GLM_EXT_quaternion_float_precision +/// @ingroup ext +/// +/// Exposes single-precision floating point quaternion type with various precision in term of ULPs. +/// +/// Include to use the features of this extension. + +#pragma once + +// Dependency: +#include "../detail/type_quat.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_float_precision extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_float_precision + /// @{ + + /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef qua lowp_quat; + + /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef qua mediump_quat; + + /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef qua highp_quat; + + /// @} +} //namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_geometric.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_geometric.hpp new file mode 100644 index 000000000000..6a2403fd2818 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_geometric.hpp @@ -0,0 +1,70 @@ +/// @ref ext_quaternion_geometric +/// @file glm/ext/quaternion_geometric.hpp +/// +/// @defgroup ext_quaternion_geometric GLM_EXT_quaternion_geometric +/// @ingroup ext +/// +/// Provides geometric functions for quaternion types +/// +/// Include to use the features of this extension. +/// +/// @see core_func_geometric +/// @see ext_quaternion_float +/// @see ext_quaternion_double + +#pragma once + +// Dependency: +#include "../geometric.hpp" +#include "../exponential.hpp" +#include "../ext/vector_relational.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_geometric extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_geometric + /// @{ + + /// Returns the norm of a quaternions + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_geometric + template + GLM_FUNC_DECL T length(qua const& q); + + /// Returns the normalized quaternion. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_geometric + template + GLM_FUNC_DECL qua normalize(qua const& q); + + /// Returns dot product of q1 and q2, i.e., q1[0] * q2[0] + q1[1] * q2[1] + ... + /// + /// @tparam T Floating-point scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_geometric + template + GLM_FUNC_DECL GLM_CONSTEXPR T dot(qua const& x, qua const& y); + + /// Compute a cross product. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_geometric + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua cross(qua const& q1, qua const& q2); + + /// @} +} //namespace glm + +#include "quaternion_geometric.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_geometric.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_geometric.inl new file mode 100644 index 000000000000..88dc4d63de18 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_geometric.inl @@ -0,0 +1,36 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T dot(qua const& x, qua const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'dot' accepts only floating-point inputs"); + return detail::compute_dot, T, detail::is_aligned::value>::call(x, y); + } + + template + GLM_FUNC_QUALIFIER T length(qua const& q) + { + return glm::sqrt(dot(q, q)); + } + + template + GLM_FUNC_QUALIFIER qua normalize(qua const& q) + { + T len = length(q); + if(len <= static_cast(0)) // Problem + return qua::wxyz(static_cast(1), static_cast(0), static_cast(0), static_cast(0)); + T oneOverLen = static_cast(1) / len; + return qua::wxyz(q.w * oneOverLen, q.x * oneOverLen, q.y * oneOverLen, q.z * oneOverLen); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua cross(qua const& q1, qua const& q2) + { + return qua::wxyz( + q1.w * q2.w - q1.x * q2.x - q1.y * q2.y - q1.z * q2.z, + q1.w * q2.x + q1.x * q2.w + q1.y * q2.z - q1.z * q2.y, + q1.w * q2.y + q1.y * q2.w + q1.z * q2.x - q1.x * q2.z, + q1.w * q2.z + q1.z * q2.w + q1.x * q2.y - q1.y * q2.x); + } +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_relational.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_relational.hpp new file mode 100644 index 000000000000..7aa121da0a15 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_relational.hpp @@ -0,0 +1,62 @@ +/// @ref ext_quaternion_relational +/// @file glm/ext/quaternion_relational.hpp +/// +/// @defgroup ext_quaternion_relational GLM_EXT_quaternion_relational +/// @ingroup ext +/// +/// Exposes comparison functions for quaternion types that take a user defined epsilon values. +/// +/// Include to use the features of this extension. +/// +/// @see core_vector_relational +/// @see ext_vector_relational +/// @see ext_matrix_relational +/// @see ext_quaternion_float +/// @see ext_quaternion_double + +#pragma once + +// Dependency: +#include "../vector_relational.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_relational extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_relational + /// @{ + + /// Returns the component-wise comparison of result x == y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> equal(qua const& x, qua const& y); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> equal(qua const& x, qua const& y, T epsilon); + + /// Returns the component-wise comparison of result x != y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> notEqual(qua const& x, qua const& y); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> notEqual(qua const& x, qua const& y, T epsilon); + + /// @} +} //namespace glm + +#include "quaternion_relational.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_relational.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_relational.inl new file mode 100644 index 000000000000..b1713e95c6c5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_relational.inl @@ -0,0 +1,35 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> equal(qua const& x, qua const& y) + { + vec<4, bool, Q> Result; + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] == y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> equal(qua const& x, qua const& y, T epsilon) + { + vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); + return lessThan(abs(v), vec<4, T, Q>(epsilon)); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> notEqual(qua const& x, qua const& y) + { + vec<4, bool, Q> Result; + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] != y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> notEqual(qua const& x, qua const& y, T epsilon) + { + vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); + return greaterThanEqual(abs(v), vec<4, T, Q>(epsilon)); + } +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_transform.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_transform.hpp new file mode 100644 index 000000000000..a9cc5c2b59ff --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_transform.hpp @@ -0,0 +1,47 @@ +/// @ref ext_quaternion_transform +/// @file glm/ext/quaternion_transform.hpp +/// +/// @defgroup ext_quaternion_transform GLM_EXT_quaternion_transform +/// @ingroup ext +/// +/// Provides transformation functions for quaternion types +/// +/// Include to use the features of this extension. +/// +/// @see ext_quaternion_float +/// @see ext_quaternion_double +/// @see ext_quaternion_exponential +/// @see ext_quaternion_geometric +/// @see ext_quaternion_relational +/// @see ext_quaternion_trigonometric + +#pragma once + +// Dependency: +#include "../common.hpp" +#include "../trigonometric.hpp" +#include "../geometric.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_transform extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_transform + /// @{ + + /// Rotates a quaternion from a vector of 3 components axis and an angle. + /// + /// @param q Source orientation + /// @param angle Angle expressed in radians. + /// @param axis Axis of the rotation + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL qua rotate(qua const& q, T const& angle, vec<3, T, Q> const& axis); + /// @} +} //namespace glm + +#include "quaternion_transform.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_transform.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_transform.inl new file mode 100644 index 000000000000..7e773fbd264d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_transform.inl @@ -0,0 +1,24 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER qua rotate(qua const& q, T const& angle, vec<3, T, Q> const& v) + { + vec<3, T, Q> Tmp = v; + + // Axis of rotation must be normalised + T len = glm::length(Tmp); + if(abs(len - static_cast(1)) > static_cast(0.001)) + { + T oneOverLen = static_cast(1) / len; + Tmp.x *= oneOverLen; + Tmp.y *= oneOverLen; + Tmp.z *= oneOverLen; + } + + T const AngleRad(angle); + T const Sin = sin(AngleRad * static_cast(0.5)); + + return q * qua::wxyz(cos(AngleRad * static_cast(0.5)), Tmp.x * Sin, Tmp.y * Sin, Tmp.z * Sin); + } +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_trigonometric.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_trigonometric.hpp new file mode 100644 index 000000000000..574a70479576 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_trigonometric.hpp @@ -0,0 +1,65 @@ +/// @ref ext_quaternion_trigonometric +/// @file glm/ext/quaternion_trigonometric.hpp +/// +/// @defgroup ext_quaternion_trigonometric GLM_EXT_quaternion_trigonometric +/// @ingroup ext +/// +/// Provides trigonometric functions for quaternion types +/// +/// Include to use the features of this extension. +/// +/// @see ext_quaternion_float +/// @see ext_quaternion_double +/// @see ext_quaternion_exponential +/// @see ext_quaternion_geometric +/// @see ext_quaternion_relational +/// @see ext_quaternion_transform + +#pragma once + +// Dependency: +#include "../trigonometric.hpp" +#include "../exponential.hpp" +#include "scalar_constants.hpp" +#include "vector_relational.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_trigonometric extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_trigonometric + /// @{ + + /// Returns the quaternion rotation angle. + /// + /// @param x A normalized quaternion. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL T angle(qua const& x); + + /// Returns the q rotation axis. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL vec<3, T, Q> axis(qua const& x); + + /// Build a quaternion from an angle and a normalized axis. + /// + /// @param angle Angle expressed in radians. + /// @param axis Axis of the quaternion, must be normalized. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua angleAxis(T const& angle, vec<3, T, Q> const& axis); + + /// @} +} //namespace glm + +#include "quaternion_trigonometric.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_trigonometric.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_trigonometric.inl new file mode 100644 index 000000000000..896449aa6e1f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/quaternion_trigonometric.inl @@ -0,0 +1,37 @@ +#include "scalar_constants.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T angle(qua const& x) + { + if (abs(x.w) > cos_one_over_two()) + { + T const a = asin(sqrt(x.x * x.x + x.y * x.y + x.z * x.z)) * static_cast(2); + if(x.w < static_cast(0)) + return pi() * static_cast(2) - a; + return a; + } + + return acos(x.w) * static_cast(2); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> axis(qua const& x) + { + T const tmp1 = static_cast(1) - x.w * x.w; + if(tmp1 <= static_cast(0)) + return vec<3, T, Q>(0, 0, 1); + T const tmp2 = static_cast(1) / sqrt(tmp1); + return vec<3, T, Q>(x.x * tmp2, x.y * tmp2, x.z * tmp2); + } + + template + GLM_FUNC_QUALIFIER qua angleAxis(T const& angle, vec<3, T, Q> const& v) + { + T const a(angle); + T const s = glm::sin(a * static_cast(0.5)); + + return qua(glm::cos(a * static_cast(0.5)), v * s); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_common.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_common.hpp new file mode 100644 index 000000000000..df04b6b809a9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_common.hpp @@ -0,0 +1,181 @@ +/// @ref ext_scalar_common +/// @file glm/ext/scalar_common.hpp +/// +/// @defgroup ext_scalar_common GLM_EXT_scalar_common +/// @ingroup ext +/// +/// Exposes min and max functions for 3 to 4 scalar parameters. +/// +/// Include to use the features of this extension. +/// +/// @see core_func_common +/// @see ext_vector_common + +#pragma once + +// Dependency: +#include "../common.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_common extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_common + /// @{ + + /// Returns the minimum component-wise values of 3 inputs + /// + /// @tparam T A floating-point scalar type. + /// + /// @see ext_scalar_common + template + GLM_FUNC_DECL T min(T a, T b, T c); + + /// Returns the minimum component-wise values of 4 inputs + /// + /// @tparam T A floating-point scalar type. + /// + /// @see ext_scalar_common + template + GLM_FUNC_DECL T min(T a, T b, T c, T d); + + /// Returns the maximum component-wise values of 3 inputs + /// + /// @tparam T A floating-point scalar type. + /// + /// @see ext_scalar_common + template + GLM_FUNC_DECL T max(T a, T b, T c); + + /// Returns the maximum component-wise values of 4 inputs + /// + /// @tparam T A floating-point scalar type. + /// + /// @see ext_scalar_common + template + GLM_FUNC_DECL T max(T a, T b, T c, T d); + + /// Returns the minimum component-wise values of 2 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmin documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmin(T a, T b); + + /// Returns the minimum component-wise values of 3 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmin documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmin(T a, T b, T c); + + /// Returns the minimum component-wise values of 4 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmin documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmin(T a, T b, T c, T d); + + /// Returns the maximum component-wise values of 2 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmax documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmax(T a, T b); + + /// Returns the maximum component-wise values of 3 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmax documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmax(T a, T b, T C); + + /// Returns the maximum component-wise values of 4 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmax documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmax(T a, T b, T C, T D); + + /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam genType Floating-point scalar types. + /// + /// @see ext_scalar_common + template + GLM_FUNC_DECL genType fclamp(genType x, genType minVal, genType maxVal); + + /// Simulate GL_CLAMP OpenGL wrap mode + /// + /// @tparam genType Floating-point scalar types. + /// + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL genType clamp(genType const& Texcoord); + + /// Simulate GL_REPEAT OpenGL wrap mode + /// + /// @tparam genType Floating-point scalar types. + /// + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL genType repeat(genType const& Texcoord); + + /// Simulate GL_MIRRORED_REPEAT OpenGL wrap mode + /// + /// @tparam genType Floating-point scalar types. + /// + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL genType mirrorClamp(genType const& Texcoord); + + /// Simulate GL_MIRROR_REPEAT OpenGL wrap mode + /// + /// @tparam genType Floating-point scalar types. + /// + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL genType mirrorRepeat(genType const& Texcoord); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// + /// @param x The values of the argument must be greater or equal to zero. + /// @tparam genType floating point scalar types. + /// + /// @see GLSL round man page + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL int iround(genType const& x); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// + /// @param x The values of the argument must be greater or equal to zero. + /// @tparam genType floating point scalar types. + /// + /// @see GLSL round man page + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL uint uround(genType const& x); + + /// @} +}//namespace glm + +#include "scalar_common.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_common.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_common.inl new file mode 100644 index 000000000000..3d09fef0cb4f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_common.inl @@ -0,0 +1,170 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER T min(T a, T b, T c) + { + return glm::min(glm::min(a, b), c); + } + + template + GLM_FUNC_QUALIFIER T min(T a, T b, T c, T d) + { + return glm::min(glm::min(a, b), glm::min(c, d)); + } + + template + GLM_FUNC_QUALIFIER T max(T a, T b, T c) + { + return glm::max(glm::max(a, b), c); + } + + template + GLM_FUNC_QUALIFIER T max(T a, T b, T c, T d) + { + return glm::max(glm::max(a, b), glm::max(c, d)); + } + +# if GLM_HAS_CXX11_STL + using std::fmin; +# else + template + GLM_FUNC_QUALIFIER T fmin(T a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmin' only accept floating-point input"); + + if (isnan(a)) + return b; + return min(a, b); + } +# endif + + template + GLM_FUNC_QUALIFIER T fmin(T a, T b, T c) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmin' only accept floating-point input"); + + if (isnan(a)) + return fmin(b, c); + if (isnan(b)) + return fmin(a, c); + if (isnan(c)) + return min(a, b); + return min(a, b, c); + } + + template + GLM_FUNC_QUALIFIER T fmin(T a, T b, T c, T d) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmin' only accept floating-point input"); + + if (isnan(a)) + return fmin(b, c, d); + if (isnan(b)) + return min(a, fmin(c, d)); + if (isnan(c)) + return fmin(min(a, b), d); + if (isnan(d)) + return min(a, b, c); + return min(a, b, c, d); + } + + +# if GLM_HAS_CXX11_STL + using std::fmax; +# else + template + GLM_FUNC_QUALIFIER T fmax(T a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmax' only accept floating-point input"); + + if (isnan(a)) + return b; + return max(a, b); + } +# endif + + template + GLM_FUNC_QUALIFIER T fmax(T a, T b, T c) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmax' only accept floating-point input"); + + if (isnan(a)) + return fmax(b, c); + if (isnan(b)) + return fmax(a, c); + if (isnan(c)) + return max(a, b); + return max(a, b, c); + } + + template + GLM_FUNC_QUALIFIER T fmax(T a, T b, T c, T d) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmax' only accept floating-point input"); + + if (isnan(a)) + return fmax(b, c, d); + if (isnan(b)) + return max(a, fmax(c, d)); + if (isnan(c)) + return fmax(max(a, b), d); + if (isnan(d)) + return max(a, b, c); + return max(a, b, c, d); + } + + // fclamp + template + GLM_FUNC_QUALIFIER genType fclamp(genType x, genType minVal, genType maxVal) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fclamp' only accept floating-point or integer inputs"); + return fmin(fmax(x, minVal), maxVal); + } + + template + GLM_FUNC_QUALIFIER genType clamp(genType const& Texcoord) + { + return glm::clamp(Texcoord, static_cast(0), static_cast(1)); + } + + template + GLM_FUNC_QUALIFIER genType repeat(genType const& Texcoord) + { + return glm::fract(Texcoord); + } + + template + GLM_FUNC_QUALIFIER genType mirrorClamp(genType const& Texcoord) + { + return glm::fract(glm::abs(Texcoord)); + } + + template + GLM_FUNC_QUALIFIER genType mirrorRepeat(genType const& Texcoord) + { + genType const Abs = glm::abs(Texcoord); + genType const Clamp = glm::mod(glm::floor(Abs), static_cast(2)); + genType const Floor = glm::floor(Abs); + genType const Rest = Abs - Floor; + genType const Mirror = Clamp + Rest; + return mix(Rest, static_cast(1) - Rest, Mirror >= static_cast(1)); + } + + template + GLM_FUNC_QUALIFIER int iround(genType const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'iround' only accept floating-point inputs"); + assert(static_cast(0.0) <= x); + + return static_cast(x + static_cast(0.5)); + } + + template + GLM_FUNC_QUALIFIER uint uround(genType const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'uround' only accept floating-point inputs"); + assert(static_cast(0.0) <= x); + + return static_cast(x + static_cast(0.5)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_constants.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_constants.hpp new file mode 100644 index 000000000000..74e210d9c09e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_constants.hpp @@ -0,0 +1,40 @@ +/// @ref ext_scalar_constants +/// @file glm/ext/scalar_constants.hpp +/// +/// @defgroup ext_scalar_constants GLM_EXT_scalar_constants +/// @ingroup ext +/// +/// Provides a list of constants and precomputed useful values. +/// +/// Include to use the features of this extension. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_constants extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_constants + /// @{ + + /// Return the epsilon constant for floating point types. + template + GLM_FUNC_DECL GLM_CONSTEXPR genType epsilon(); + + /// Return the pi constant for floating point types. + template + GLM_FUNC_DECL GLM_CONSTEXPR genType pi(); + + /// Return the value of cos(1 / 2) for floating point types. + template + GLM_FUNC_DECL GLM_CONSTEXPR genType cos_one_over_two(); + + /// @} +} //namespace glm + +#include "scalar_constants.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_constants.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_constants.inl new file mode 100644 index 000000000000..b928e5118bff --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_constants.inl @@ -0,0 +1,24 @@ +#include + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType epsilon() + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'epsilon' only accepts floating-point inputs"); + return std::numeric_limits::epsilon(); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType pi() + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'pi' only accepts floating-point inputs"); + return static_cast(3.14159265358979323846264338327950288); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType cos_one_over_two() + { + return genType(0.877582561890372716130286068203503191); + } +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_int_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_int_sized.hpp new file mode 100644 index 000000000000..8e9c511c9cb5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_int_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_scalar_int_sized +/// @file glm/ext/scalar_int_sized.hpp +/// +/// @defgroup ext_scalar_int_sized GLM_EXT_scalar_int_sized +/// @ingroup ext +/// +/// Exposes sized signed integer scalar types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_uint_sized + +#pragma once + +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_int_sized extension included") +#endif + +namespace glm{ +namespace detail +{ +# if GLM_HAS_EXTENDED_INTEGER_TYPE + typedef std::int8_t int8; + typedef std::int16_t int16; + typedef std::int32_t int32; +# else + typedef signed char int8; + typedef signed short int16; + typedef signed int int32; +#endif// + + template<> + struct is_int + { + enum test {value = ~0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; +}//namespace detail + + + /// @addtogroup ext_scalar_int_sized + /// @{ + + /// 8 bit signed integer type. + typedef detail::int8 int8; + + /// 16 bit signed integer type. + typedef detail::int16 int16; + + /// 32 bit signed integer type. + typedef detail::int32 int32; + + /// 64 bit signed integer type. + typedef detail::int64 int64; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_integer.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_integer.hpp new file mode 100644 index 000000000000..a2ca8a2ae37c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_integer.hpp @@ -0,0 +1,92 @@ +/// @ref ext_scalar_integer +/// @file glm/ext/scalar_integer.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_scalar_integer GLM_EXT_scalar_integer +/// @ingroup ext +/// +/// Include to use the features of this extension. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_vectorize.hpp" +#include "../detail/type_float.hpp" +#include "../vector_relational.hpp" +#include "../common.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_integer + /// @{ + + /// Return true if the value is a power of two number. + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL bool isPowerOfTwo(genIUType v); + + /// Return the power of two number which value is just higher the input value, + /// round up to a power of two. + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL genIUType nextPowerOfTwo(genIUType v); + + /// Return the power of two number which value is just lower the input value, + /// round down to a power of two. + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL genIUType prevPowerOfTwo(genIUType v); + + /// Return true if the 'Value' is a multiple of 'Multiple'. + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL bool isMultiple(genIUType v, genIUType Multiple); + + /// Higher multiple number of Source. + /// + /// @tparam genIUType Integer scalar or vector types. + /// + /// @param v Source value to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL genIUType nextMultiple(genIUType v, genIUType Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam genIUType Integer scalar or vector types. + /// + /// @param v Source value to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL genIUType prevMultiple(genIUType v, genIUType Multiple); + + /// Returns the bit number of the Nth significant bit set to + /// 1 in the binary representation of value. + /// If value bitcount is less than the Nth significant bit, -1 will be returned. + /// + /// @tparam genIUType Signed or unsigned integer scalar types. + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL int findNSB(genIUType x, int significantBitCount); + + /// @} +} //namespace glm + +#include "scalar_integer.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_integer.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_integer.inl new file mode 100644 index 000000000000..d416197e8667 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_integer.inl @@ -0,0 +1,243 @@ +#include "../integer.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_ceilShift + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T) + { + return v; + } + }; + + template + struct compute_ceilShift + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T Shift) + { + return v | (v >> Shift); + } + }; + + template + struct compute_ceilPowerOfTwo + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + GLM_STATIC_ASSERT(!std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'ceilPowerOfTwo' only accept integer scalar or vector inputs"); + + vec const Sign(sign(x)); + + vec v(abs(x)); + + v = v - static_cast(1); + v = v | (v >> static_cast(1)); + v = v | (v >> static_cast(2)); + v = v | (v >> static_cast(4)); + v = compute_ceilShift= 2>::call(v, 8); + v = compute_ceilShift= 4>::call(v, 16); + v = compute_ceilShift= 8>::call(v, 32); + return (v + static_cast(1)) * Sign; + } + }; + + template + struct compute_ceilPowerOfTwo + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + GLM_STATIC_ASSERT(!std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'ceilPowerOfTwo' only accept integer scalar or vector inputs"); + + vec v(x); + + v = v - static_cast(1); + v = v | (v >> static_cast(1)); + v = v | (v >> static_cast(2)); + v = v | (v >> static_cast(4)); + v = compute_ceilShift= 2>::call(v, 8); + v = compute_ceilShift= 4>::call(v, 16); + v = compute_ceilShift= 8>::call(v, 32); + return v + static_cast(1); + } + }; + + template + struct compute_ceilMultiple{}; + + template<> + struct compute_ceilMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if(Source > genType(0)) + return Source + (Multiple - std::fmod(Source, Multiple)); + else + return Source + std::fmod(-Source, Multiple); + } + }; + + template<> + struct compute_ceilMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + genType Tmp = Source - genType(1); + return Tmp + (Multiple - (Tmp % Multiple)); + } + }; + + template<> + struct compute_ceilMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + assert(Multiple > genType(0)); + if(Source > genType(0)) + { + genType Tmp = Source - genType(1); + return Tmp + (Multiple - (Tmp % Multiple)); + } + else + return Source + (-Source % Multiple); + } + }; + + template + struct compute_floorMultiple{}; + + template<> + struct compute_floorMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if(Source >= genType(0)) + return Source - std::fmod(Source, Multiple); + else + return Source - std::fmod(Source, Multiple) - Multiple; + } + }; + + template<> + struct compute_floorMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if(Source >= genType(0)) + return Source - Source % Multiple; + else + { + genType Tmp = Source + genType(1); + return Tmp - Tmp % Multiple - Multiple; + } + } + }; + + template<> + struct compute_floorMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if(Source >= genType(0)) + return Source - Source % Multiple; + else + { + genType Tmp = Source + genType(1); + return Tmp - Tmp % Multiple - Multiple; + } + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER bool isPowerOfTwo(genIUType Value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isPowerOfTwo' only accept integer inputs"); + + genIUType const Result = glm::abs(Value); + return !(Result & (Result - 1)); + } + + template + GLM_FUNC_QUALIFIER genIUType nextPowerOfTwo(genIUType value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextPowerOfTwo' only accept integer inputs"); + + return detail::compute_ceilPowerOfTwo<1, genIUType, defaultp, std::numeric_limits::is_signed>::call(vec<1, genIUType, defaultp>(value)).x; + } + + template + GLM_FUNC_QUALIFIER genIUType prevPowerOfTwo(genIUType value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevPowerOfTwo' only accept integer inputs"); + + return isPowerOfTwo(value) ? value : static_cast(static_cast(1) << static_cast(findMSB(value))); + } + + template + GLM_FUNC_QUALIFIER bool isMultiple(genIUType Value, genIUType Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); + + return isMultiple(vec<1, genIUType>(Value), vec<1, genIUType>(Multiple)).x; + } + + template + GLM_FUNC_QUALIFIER genIUType nextMultiple(genIUType Source, genIUType Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); + + return detail::compute_ceilMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER genIUType prevMultiple(genIUType Source, genIUType Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); + + return detail::compute_floorMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER int findNSB(genIUType x, int significantBitCount) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findNSB' only accept integer inputs"); + + if(bitCount(x) < significantBitCount) + return -1; + + genIUType const One = static_cast(1); + int bitPos = 0; + + genIUType key = x; + int nBitCount = significantBitCount; + int Step = sizeof(x) * 8 / 2; + while (key > One) + { + genIUType Mask = static_cast((One << Step) - One); + genIUType currentKey = key & Mask; + int currentBitCount = bitCount(currentKey); + if (nBitCount > currentBitCount) + { + nBitCount -= currentBitCount; + bitPos += Step; + key >>= static_cast(Step); + } + else + { + key = key & Mask; + } + + Step >>= 1; + } + + return static_cast(bitPos); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_packing.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_packing.hpp new file mode 100644 index 000000000000..18b85b72a404 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_packing.hpp @@ -0,0 +1,32 @@ +/// @ref ext_scalar_packing +/// @file glm/ext/scalar_packing.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_scalar_packing GLM_EXT_scalar_packing +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// This extension provides a set of function to convert scalar values to packed +/// formats. + +#pragma once + +// Dependency: +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_packing extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_packing + /// @{ + + + /// @} +}// namespace glm + +#include "scalar_packing.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_packing.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_packing.inl new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_reciprocal.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_reciprocal.hpp new file mode 100644 index 000000000000..1c7b81dde09a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_reciprocal.hpp @@ -0,0 +1,135 @@ +/// @ref ext_scalar_reciprocal +/// @file glm/ext/scalar_reciprocal.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_scalar_reciprocal GLM_EXT_scalar_reciprocal +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Define secant, cosecant and cotangent functions. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_reciprocal extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_reciprocal + /// @{ + + /// Secant function. + /// hypotenuse / adjacent or 1 / cos(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType sec(genType angle); + + /// Cosecant function. + /// hypotenuse / opposite or 1 / sin(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType csc(genType angle); + + /// Cotangent function. + /// adjacent / opposite or 1 / tan(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType cot(genType angle); + + /// Inverse secant function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType asec(genType x); + + /// Inverse cosecant function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType acsc(genType x); + + /// Inverse cotangent function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType acot(genType x); + + /// Secant hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType sech(genType angle); + + /// Cosecant hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType csch(genType angle); + + /// Cotangent hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType coth(genType angle); + + /// Inverse secant hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType asech(genType x); + + /// Inverse cosecant hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType acsch(genType x); + + /// Inverse cotangent hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType acoth(genType x); + + /// @} +}//namespace glm + +#include "scalar_reciprocal.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_reciprocal.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_reciprocal.inl new file mode 100644 index 000000000000..0cd5f87b4e48 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_reciprocal.inl @@ -0,0 +1,107 @@ +/// @ref ext_scalar_reciprocal + +#include "../trigonometric.hpp" +#include + +namespace glm +{ + // sec + template + GLM_FUNC_QUALIFIER genType sec(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'sec' only accept floating-point values"); + return genType(1) / glm::cos(angle); + } + + // csc + template + GLM_FUNC_QUALIFIER genType csc(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'csc' only accept floating-point values"); + return genType(1) / glm::sin(angle); + } + + // cot + template + GLM_FUNC_QUALIFIER genType cot(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'cot' only accept floating-point values"); + + genType const pi_over_2 = genType(3.1415926535897932384626433832795 / 2.0); + return glm::tan(pi_over_2 - angle); + } + + // asec + template + GLM_FUNC_QUALIFIER genType asec(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'asec' only accept floating-point values"); + return acos(genType(1) / x); + } + + // acsc + template + GLM_FUNC_QUALIFIER genType acsc(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acsc' only accept floating-point values"); + return asin(genType(1) / x); + } + + // acot + template + GLM_FUNC_QUALIFIER genType acot(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acot' only accept floating-point values"); + + genType const pi_over_2 = genType(3.1415926535897932384626433832795 / 2.0); + return pi_over_2 - atan(x); + } + + // sech + template + GLM_FUNC_QUALIFIER genType sech(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'sech' only accept floating-point values"); + return genType(1) / glm::cosh(angle); + } + + // csch + template + GLM_FUNC_QUALIFIER genType csch(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'csch' only accept floating-point values"); + return genType(1) / glm::sinh(angle); + } + + // coth + template + GLM_FUNC_QUALIFIER genType coth(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'coth' only accept floating-point values"); + return glm::cosh(angle) / glm::sinh(angle); + } + + // asech + template + GLM_FUNC_QUALIFIER genType asech(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'asech' only accept floating-point values"); + return acosh(genType(1) / x); + } + + // acsch + template + GLM_FUNC_QUALIFIER genType acsch(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acsch' only accept floating-point values"); + return asinh(genType(1) / x); + } + + // acoth + template + GLM_FUNC_QUALIFIER genType acoth(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acoth' only accept floating-point values"); + return atanh(genType(1) / x); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_relational.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_relational.hpp new file mode 100644 index 000000000000..e84df1786824 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_relational.hpp @@ -0,0 +1,68 @@ +/// @ref ext_scalar_relational +/// @file glm/ext/scalar_relational.hpp +/// +/// @defgroup ext_scalar_relational GLM_EXT_scalar_relational +/// @ingroup ext +/// +/// Exposes comparison functions for scalar types that take a user defined epsilon values. +/// +/// Include to use the features of this extension. +/// +/// @see core_vector_relational +/// @see ext_vector_relational +/// @see ext_matrix_relational + +#pragma once + +// Dependencies +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_relational extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_relational + /// @{ + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @tparam genType Floating-point or integer scalar types + template + GLM_FUNC_DECL GLM_CONSTEXPR bool equal(genType const& x, genType const& y, genType const& epsilon); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// True if this expression is not satisfied. + /// + /// @tparam genType Floating-point or integer scalar types + template + GLM_FUNC_DECL GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, genType const& epsilon); + + /// Returns the component-wise comparison between two scalars in term of ULPs. + /// True if this expression is satisfied. + /// + /// @param x First operand. + /// @param y Second operand. + /// @param ULPs Maximum difference in ULPs between the two operators to consider them equal. + /// + /// @tparam genType Floating-point or integer scalar types + template + GLM_FUNC_DECL GLM_CONSTEXPR bool equal(genType const& x, genType const& y, int ULPs); + + /// Returns the component-wise comparison between two scalars in term of ULPs. + /// True if this expression is not satisfied. + /// + /// @param x First operand. + /// @param y Second operand. + /// @param ULPs Maximum difference in ULPs between the two operators to consider them not equal. + /// + /// @tparam genType Floating-point or integer scalar types + template + GLM_FUNC_DECL GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, int ULPs); + + /// @} +}//namespace glm + +#include "scalar_relational.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_relational.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_relational.inl new file mode 100644 index 000000000000..c85583ef5bbd --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_relational.inl @@ -0,0 +1,40 @@ +#include "../common.hpp" +#include "../ext/scalar_int_sized.hpp" +#include "../ext/scalar_uint_sized.hpp" +#include "../detail/type_float.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool equal(genType const& x, genType const& y, genType const& epsilon) + { + return abs(x - y) <= epsilon; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, genType const& epsilon) + { + return abs(x - y) > epsilon; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool equal(genType const& x, genType const& y, int MaxULPs) + { + detail::float_t const a(x); + detail::float_t const b(y); + + // Different signs means they do not match. + if(a.negative() != b.negative()) + return false; + + // Find the difference in ULPs. + typename detail::float_t::int_type const DiffULPs = abs(a.i - b.i); + return DiffULPs <= MaxULPs; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, int ULPs) + { + return !equal(x, y, ULPs); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_uint_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_uint_sized.hpp new file mode 100644 index 000000000000..fd5267fad7c1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_uint_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_scalar_uint_sized +/// @file glm/ext/scalar_uint_sized.hpp +/// +/// @defgroup ext_scalar_uint_sized GLM_EXT_scalar_uint_sized +/// @ingroup ext +/// +/// Exposes sized unsigned integer scalar types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_int_sized + +#pragma once + +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_uint_sized extension included") +#endif + +namespace glm{ +namespace detail +{ +# if GLM_HAS_EXTENDED_INTEGER_TYPE + typedef std::uint8_t uint8; + typedef std::uint16_t uint16; + typedef std::uint32_t uint32; +# else + typedef unsigned char uint8; + typedef unsigned short uint16; + typedef unsigned int uint32; +#endif + + template<> + struct is_int + { + enum test {value = ~0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; +}//namespace detail + + + /// @addtogroup ext_scalar_uint_sized + /// @{ + + /// 8 bit unsigned integer type. + typedef detail::uint8 uint8; + + /// 16 bit unsigned integer type. + typedef detail::uint16 uint16; + + /// 32 bit unsigned integer type. + typedef detail::uint32 uint32; + + /// 64 bit unsigned integer type. + typedef detail::uint64 uint64; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_ulp.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_ulp.hpp new file mode 100644 index 000000000000..6344d95bf23c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_ulp.hpp @@ -0,0 +1,77 @@ +/// @ref ext_scalar_ulp +/// @file glm/ext/scalar_ulp.hpp +/// +/// @defgroup ext_scalar_ulp GLM_EXT_scalar_ulp +/// @ingroup ext +/// +/// Allow the measurement of the accuracy of a function against a reference +/// implementation. This extension works on floating-point data and provide results +/// in ULP. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_ulp +/// @see ext_scalar_relational + +#pragma once + +// Dependencies +#include "../ext/scalar_int_sized.hpp" +#include "../common.hpp" +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_ulp extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_ulp + /// @{ + + /// Return the next ULP value(s) after the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL genType nextFloat(genType x); + + /// Return the previous ULP value(s) before the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL genType prevFloat(genType x); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL genType nextFloat(genType x, int ULPs); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL genType prevFloat(genType x, int ULPs); + + /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. + /// + /// @see ext_scalar_ulp + GLM_FUNC_DECL int floatDistance(float x, float y); + + /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. + /// + /// @see ext_scalar_ulp + GLM_FUNC_DECL int64 floatDistance(double x, double y); + + /// @} +}//namespace glm + +#include "scalar_ulp.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_ulp.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_ulp.inl new file mode 100644 index 000000000000..919403fd6ad3 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/scalar_ulp.inl @@ -0,0 +1,284 @@ +/// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. +/// +/// Developed at SunPro, a Sun Microsystems, Inc. business. +/// Permission to use, copy, modify, and distribute this +/// software is freely granted, provided that this notice +/// is preserved. + +#include "../detail/type_float.hpp" +#include "../ext/scalar_constants.hpp" +#include +#include + +#if(GLM_COMPILER & GLM_COMPILER_VC) +# pragma warning(push) +# pragma warning(disable : 4127) +#endif + +typedef union +{ + float value; + /* FIXME: Assumes 32 bit int. */ + unsigned int word; +} ieee_float_shape_type; + +typedef union +{ + double value; + struct + { + int lsw; + int msw; + } parts; +} ieee_double_shape_type; + +#define GLM_EXTRACT_WORDS(ix0,ix1,d) \ + do { \ + ieee_double_shape_type ew_u; \ + ew_u.value = (d); \ + (ix0) = ew_u.parts.msw; \ + (ix1) = ew_u.parts.lsw; \ + } while (0) + +#define GLM_GET_FLOAT_WORD(i,d) \ + do { \ + ieee_float_shape_type gf_u; \ + gf_u.value = (d); \ + (i) = gf_u.word; \ + } while (0) + +#define GLM_SET_FLOAT_WORD(d,i) \ + do { \ + ieee_float_shape_type sf_u; \ + sf_u.word = (i); \ + (d) = sf_u.value; \ + } while (0) + +#define GLM_INSERT_WORDS(d,ix0,ix1) \ + do { \ + ieee_double_shape_type iw_u; \ + iw_u.parts.msw = (ix0); \ + iw_u.parts.lsw = (ix1); \ + (d) = iw_u.value; \ + } while (0) + +namespace glm{ +namespace detail +{ + GLM_FUNC_QUALIFIER float nextafterf(float x, float y) + { + volatile float t; + int hx, hy, ix, iy; + + GLM_GET_FLOAT_WORD(hx, x); + GLM_GET_FLOAT_WORD(hy, y); + ix = hx & 0x7fffffff; // |x| + iy = hy & 0x7fffffff; // |y| + + if((ix > 0x7f800000) || // x is nan + (iy > 0x7f800000)) // y is nan + return x + y; + if(abs(y - x) <= epsilon()) + return y; // x=y, return y + if(ix == 0) + { // x == 0 + GLM_SET_FLOAT_WORD(x, (hy & 0x80000000) | 1);// return +-minsubnormal + t = x * x; + if(abs(t - x) <= epsilon()) + return t; + else + return x; // raise underflow flag + } + if(hx >= 0) + { // x > 0 + if(hx > hy) // x > y, x -= ulp + hx -= 1; + else // x < y, x += ulp + hx += 1; + } + else + { // x < 0 + if(hy >= 0 || hx > hy) // x < y, x -= ulp + hx -= 1; + else // x > y, x += ulp + hx += 1; + } + hy = hx & 0x7f800000; + if(hy >= 0x7f800000) + return x + x; // overflow + if(hy < 0x00800000) // underflow + { + t = x * x; + if(abs(t - x) > epsilon()) + { // raise underflow flag + GLM_SET_FLOAT_WORD(y, hx); + return y; + } + } + GLM_SET_FLOAT_WORD(x, hx); + return x; + } + + GLM_FUNC_QUALIFIER double nextafter(double x, double y) + { + volatile double t; + int hx, hy, ix, iy; + unsigned int lx, ly; + + GLM_EXTRACT_WORDS(hx, lx, x); + GLM_EXTRACT_WORDS(hy, ly, y); + ix = hx & 0x7fffffff; // |x| + iy = hy & 0x7fffffff; // |y| + + if(((ix >= 0x7ff00000) && ((ix - 0x7ff00000) | lx) != 0) || // x is nan + ((iy >= 0x7ff00000) && ((iy - 0x7ff00000) | ly) != 0)) // y is nan + return x + y; + if(abs(y - x) <= epsilon()) + return y; // x=y, return y + if((ix | lx) == 0) + { // x == 0 + GLM_INSERT_WORDS(x, hy & 0x80000000, 1); // return +-minsubnormal + t = x * x; + if(abs(t - x) <= epsilon()) + return t; + else + return x; // raise underflow flag + } + if(hx >= 0) { // x > 0 + if(hx > hy || ((hx == hy) && (lx > ly))) { // x > y, x -= ulp + if(lx == 0) hx -= 1; + lx -= 1; + } + else { // x < y, x += ulp + lx += 1; + if(lx == 0) hx += 1; + } + } + else { // x < 0 + if(hy >= 0 || hx > hy || ((hx == hy) && (lx > ly))){// x < y, x -= ulp + if(lx == 0) hx -= 1; + lx -= 1; + } + else { // x > y, x += ulp + lx += 1; + if(lx == 0) hx += 1; + } + } + hy = hx & 0x7ff00000; + if(hy >= 0x7ff00000) + return x + x; // overflow + if(hy < 0x00100000) + { // underflow + t = x * x; + if(abs(t - x) > epsilon()) + { // raise underflow flag + GLM_INSERT_WORDS(y, hx, lx); + return y; + } + } + GLM_INSERT_WORDS(x, hx, lx); + return x; + } +}//namespace detail +}//namespace glm + +#if(GLM_COMPILER & GLM_COMPILER_VC) +# pragma warning(pop) +#endif + +namespace glm +{ + template<> + GLM_FUNC_QUALIFIER float nextFloat(float x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::max()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafterf(x, FLT_MAX); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafterf(x, FLT_MAX); +# else + return nextafterf(x, FLT_MAX); +# endif + } + + template<> + GLM_FUNC_QUALIFIER double nextFloat(double x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::max()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafter(x, std::numeric_limits::max()); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafter(x, DBL_MAX); +# else + return nextafter(x, DBL_MAX); +# endif + } + + template + GLM_FUNC_QUALIFIER T nextFloat(T x, int ULPs) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'next_float' only accept floating-point input"); + assert(ULPs >= 0); + + T temp = x; + for(int i = 0; i < ULPs; ++i) + temp = nextFloat(temp); + return temp; + } + + GLM_FUNC_QUALIFIER float prevFloat(float x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::min()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafterf(x, FLT_MIN); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafterf(x, FLT_MIN); +# else + return nextafterf(x, FLT_MIN); +# endif + } + + GLM_FUNC_QUALIFIER double prevFloat(double x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::min()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return _nextafter(x, DBL_MIN); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafter(x, DBL_MIN); +# else + return nextafter(x, DBL_MIN); +# endif + } + + template + GLM_FUNC_QUALIFIER T prevFloat(T x, int ULPs) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'prev_float' only accept floating-point input"); + assert(ULPs >= 0); + + T temp = x; + for(int i = 0; i < ULPs; ++i) + temp = prevFloat(temp); + return temp; + } + + GLM_FUNC_QUALIFIER int floatDistance(float x, float y) + { + detail::float_t const a(x); + detail::float_t const b(y); + + return abs(a.i - b.i); + } + + GLM_FUNC_QUALIFIER int64 floatDistance(double x, double y) + { + detail::float_t const a(x); + detail::float_t const b(y); + + return abs(a.i - b.i); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool1.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool1.hpp new file mode 100644 index 000000000000..002c3202adf0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool1.hpp @@ -0,0 +1,30 @@ +/// @ref ext_vector_bool1 +/// @file glm/ext/vector_bool1.hpp +/// +/// @defgroup ext_vector_bool1 GLM_EXT_vector_bool1 +/// @ingroup ext +/// +/// Exposes bvec1 vector type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_bool1_precision extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_bool1 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_bool1 + /// @{ + + /// 1 components vector of boolean. + typedef vec<1, bool, defaultp> bvec1; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool1_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool1_precision.hpp new file mode 100644 index 000000000000..e62d3cfb5fd4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool1_precision.hpp @@ -0,0 +1,34 @@ +/// @ref ext_vector_bool1_precision +/// @file glm/ext/vector_bool1_precision.hpp +/// +/// @defgroup ext_vector_bool1_precision GLM_EXT_vector_bool1_precision +/// @ingroup ext +/// +/// Exposes highp_bvec1, mediump_bvec1 and lowp_bvec1 types. +/// +/// Include to use the features of this extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_bool1_precision extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_bool1_precision + /// @{ + + /// 1 component vector of bool values. + typedef vec<1, bool, highp> highp_bvec1; + + /// 1 component vector of bool values. + typedef vec<1, bool, mediump> mediump_bvec1; + + /// 1 component vector of bool values. + typedef vec<1, bool, lowp> lowp_bvec1; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool2.hpp new file mode 100644 index 000000000000..52288b75c697 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_bool2.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 2 components vector of boolean. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<2, bool, defaultp> bvec2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool2_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool2_precision.hpp new file mode 100644 index 000000000000..43709332c629 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_bool2_precision.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 2 components vector of high qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, bool, highp> highp_bvec2; + + /// 2 components vector of medium qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, bool, mediump> mediump_bvec2; + + /// 2 components vector of low qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, bool, lowp> lowp_bvec2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool3.hpp new file mode 100644 index 000000000000..90a0b7ea5ac0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_bool3.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 3 components vector of boolean. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<3, bool, defaultp> bvec3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool3_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool3_precision.hpp new file mode 100644 index 000000000000..89cd2d3207a1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_bool3_precision.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 3 components vector of high qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, bool, highp> highp_bvec3; + + /// 3 components vector of medium qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, bool, mediump> mediump_bvec3; + + /// 3 components vector of low qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, bool, lowp> lowp_bvec3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool4.hpp new file mode 100644 index 000000000000..18aa71bd0f49 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_bool4.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 4 components vector of boolean. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<4, bool, defaultp> bvec4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool4_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool4_precision.hpp new file mode 100644 index 000000000000..79786e54206b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_bool4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_bool4_precision.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 4 components vector of high qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, bool, highp> highp_bvec4; + + /// 4 components vector of medium qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, bool, mediump> mediump_bvec4; + + /// 4 components vector of low qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, bool, lowp> lowp_bvec4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_common.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_common.hpp new file mode 100644 index 000000000000..c0a2858cc288 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_common.hpp @@ -0,0 +1,228 @@ +/// @ref ext_vector_common +/// @file glm/ext/vector_common.hpp +/// +/// @defgroup ext_vector_common GLM_EXT_vector_common +/// @ingroup ext +/// +/// Exposes min and max functions for 3 to 4 vector parameters. +/// +/// Include to use the features of this extension. +/// +/// @see core_common +/// @see ext_scalar_common + +#pragma once + +// Dependency: +#include "../ext/scalar_common.hpp" +#include "../common.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_common extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_common + /// @{ + + /// Return the minimum component-wise values of 3 inputs + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& a, vec const& b, vec const& c); + + /// Return the minimum component-wise values of 4 inputs + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& a, vec const& b, vec const& c, vec const& d); + + /// Return the maximum component-wise values of 3 inputs + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z); + + /// Return the maximum component-wise values of 4 inputs + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec max( vec const& x, vec const& y, vec const& z, vec const& w); + + /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmin documentation + template + GLM_FUNC_DECL vec fmin(vec const& x, T y); + + /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmin documentation + template + GLM_FUNC_DECL vec fmin(vec const& x, vec const& y); + + /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmin documentation + template + GLM_FUNC_DECL vec fmin(vec const& a, vec const& b, vec const& c); + + /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmin documentation + template + GLM_FUNC_DECL vec fmin(vec const& a, vec const& b, vec const& c, vec const& d); + + /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmax documentation + template + GLM_FUNC_DECL vec fmax(vec const& a, T b); + + /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmax documentation + template + GLM_FUNC_DECL vec fmax(vec const& a, vec const& b); + + /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmax documentation + template + GLM_FUNC_DECL vec fmax(vec const& a, vec const& b, vec const& c); + + /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmax documentation + template + GLM_FUNC_DECL vec fmax(vec const& a, vec const& b, vec const& c, vec const& d); + + /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common + template + GLM_FUNC_DECL vec fclamp(vec const& x, T minVal, T maxVal); + + /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common + template + GLM_FUNC_DECL vec fclamp(vec const& x, vec const& minVal, vec const& maxVal); + + /// Simulate GL_CLAMP OpenGL wrap mode + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec clamp(vec const& Texcoord); + + /// Simulate GL_REPEAT OpenGL wrap mode + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec repeat(vec const& Texcoord); + + /// Simulate GL_MIRRORED_REPEAT OpenGL wrap mode + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec mirrorClamp(vec const& Texcoord); + + /// Simulate GL_MIRROR_REPEAT OpenGL wrap mode + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec mirrorRepeat(vec const& Texcoord); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// + /// @param x The values of the argument must be greater or equal to zero. + /// @tparam T floating point scalar types. + /// + /// @see GLSL round man page + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec iround(vec const& x); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// + /// @param x The values of the argument must be greater or equal to zero. + /// @tparam T floating point scalar types. + /// + /// @see GLSL round man page + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec uround(vec const& x); + + /// @} +}//namespace glm + +#include "vector_common.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_common.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_common.inl new file mode 100644 index 000000000000..67817fc554b0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_common.inl @@ -0,0 +1,147 @@ +#include "../detail/_vectorize.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& x, vec const& y, vec const& z) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); + return glm::min(glm::min(x, y), z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& x, vec const& y, vec const& z, vec const& w) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); + return glm::min(glm::min(x, y), glm::min(z, w)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); + return glm::max(glm::max(x, y), z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z, vec const& w) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); + return glm::max(glm::max(x, y), glm::max(z, w)); + } + + template + GLM_FUNC_QUALIFIER vec fmin(vec const& a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); + return detail::functor2::call(fmin, a, vec(b)); + } + + template + GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); + return detail::functor2::call(fmin, a, b); + } + + template + GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b, vec const& c) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); + return fmin(fmin(a, b), c); + } + + template + GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b, vec const& c, vec const& d) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); + return fmin(fmin(a, b), fmin(c, d)); + } + + template + GLM_FUNC_QUALIFIER vec fmax(vec const& a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); + return detail::functor2::call(fmax, a, vec(b)); + } + + template + GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); + return detail::functor2::call(fmax, a, b); + } + + template + GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b, vec const& c) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); + return fmax(fmax(a, b), c); + } + + template + GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b, vec const& c, vec const& d) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); + return fmax(fmax(a, b), fmax(c, d)); + } + + template + GLM_FUNC_QUALIFIER vec fclamp(vec const& x, T minVal, T maxVal) + { + return fmin(fmax(x, vec(minVal)), vec(maxVal)); + } + + template + GLM_FUNC_QUALIFIER vec fclamp(vec const& x, vec const& minVal, vec const& maxVal) + { + return fmin(fmax(x, minVal), maxVal); + } + + template + GLM_FUNC_QUALIFIER vec clamp(vec const& Texcoord) + { + return glm::clamp(Texcoord, vec(0), vec(1)); + } + + template + GLM_FUNC_QUALIFIER vec repeat(vec const& Texcoord) + { + return glm::fract(Texcoord); + } + + template + GLM_FUNC_QUALIFIER vec mirrorClamp(vec const& Texcoord) + { + return glm::fract(glm::abs(Texcoord)); + } + + template + GLM_FUNC_QUALIFIER vec mirrorRepeat(vec const& Texcoord) + { + vec const Abs = glm::abs(Texcoord); + vec const Clamp = glm::mod(glm::floor(Abs), vec(2)); + vec const Floor = glm::floor(Abs); + vec const Rest = Abs - Floor; + vec const Mirror = Clamp + Rest; + return mix(Rest, vec(1) - Rest, glm::greaterThanEqual(Mirror, vec(1))); + } + + template + GLM_FUNC_QUALIFIER vec iround(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'iround' only accept floating-point inputs"); + assert(all(lessThanEqual(vec(0), x))); + + return vec(x + static_cast(0.5)); + } + + template + GLM_FUNC_QUALIFIER vec uround(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'uround' only accept floating-point inputs"); + assert(all(lessThanEqual(vec(0), x))); + + return vec(x + static_cast(0.5)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double1.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double1.hpp new file mode 100644 index 000000000000..388266774d8b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double1.hpp @@ -0,0 +1,31 @@ +/// @ref ext_vector_double1 +/// @file glm/ext/vector_double1.hpp +/// +/// @defgroup ext_vector_double1 GLM_EXT_vector_double1 +/// @ingroup ext +/// +/// Exposes double-precision floating point vector type with one component. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_double1_precision extension. +/// @see ext_vector_float1 extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_double1 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_double1 + /// @{ + + /// 1 components vector of double-precision floating-point numbers. + typedef vec<1, double, defaultp> dvec1; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double1_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double1_precision.hpp new file mode 100644 index 000000000000..1d4719595481 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double1_precision.hpp @@ -0,0 +1,36 @@ +/// @ref ext_vector_double1_precision +/// @file glm/ext/vector_double1_precision.hpp +/// +/// @defgroup ext_vector_double1_precision GLM_EXT_vector_double1_precision +/// @ingroup ext +/// +/// Exposes highp_dvec1, mediump_dvec1 and lowp_dvec1 types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_double1 + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_double1_precision extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_double1_precision + /// @{ + + /// 1 component vector of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, double, highp> highp_dvec1; + + /// 1 component vector of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, double, mediump> mediump_dvec1; + + /// 1 component vector of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, double, lowp> lowp_dvec1; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double2.hpp new file mode 100644 index 000000000000..60e357750b67 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_double2.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 2 components vector of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<2, double, defaultp> dvec2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double2_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double2_precision.hpp new file mode 100644 index 000000000000..fa53940f6bbe --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_double2_precision.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 2 components vector of high double-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, double, highp> highp_dvec2; + + /// 2 components vector of medium double-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, double, mediump> mediump_dvec2; + + /// 2 components vector of low double-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, double, lowp> lowp_dvec2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double3.hpp new file mode 100644 index 000000000000..6dfe4c675b5d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_double3.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 3 components vector of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<3, double, defaultp> dvec3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double3_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double3_precision.hpp new file mode 100644 index 000000000000..a8cfa37a8c78 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double3_precision.hpp @@ -0,0 +1,34 @@ +/// @ref core +/// @file glm/ext/vector_double3_precision.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 3 components vector of high double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, double, highp> highp_dvec3; + + /// 3 components vector of medium double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, double, mediump> mediump_dvec3; + + /// 3 components vector of low double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, double, lowp> lowp_dvec3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double4.hpp new file mode 100644 index 000000000000..87f225f64d4a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_double4.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 4 components vector of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<4, double, defaultp> dvec4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double4_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double4_precision.hpp new file mode 100644 index 000000000000..09cafa1ebafd --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_double4_precision.hpp @@ -0,0 +1,35 @@ +/// @ref core +/// @file glm/ext/vector_double4_precision.hpp + +#pragma once +#include "../detail/setup.hpp" +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 4 components vector of high double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, double, highp> highp_dvec4; + + /// 4 components vector of medium double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, double, mediump> mediump_dvec4; + + /// 4 components vector of low double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, double, lowp> lowp_dvec4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float1.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float1.hpp new file mode 100644 index 000000000000..28acc2c9ca59 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float1.hpp @@ -0,0 +1,31 @@ +/// @ref ext_vector_float1 +/// @file glm/ext/vector_float1.hpp +/// +/// @defgroup ext_vector_float1 GLM_EXT_vector_float1 +/// @ingroup ext +/// +/// Exposes single-precision floating point vector type with one component. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_float1_precision extension. +/// @see ext_vector_double1 extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_float1 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_float1 + /// @{ + + /// 1 components vector of single-precision floating-point numbers. + typedef vec<1, float, defaultp> vec1; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float1_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float1_precision.hpp new file mode 100644 index 000000000000..6e8dad8d17c8 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float1_precision.hpp @@ -0,0 +1,36 @@ +/// @ref ext_vector_float1_precision +/// @file glm/ext/vector_float1_precision.hpp +/// +/// @defgroup ext_vector_float1_precision GLM_EXT_vector_float1_precision +/// @ingroup ext +/// +/// Exposes highp_vec1, mediump_vec1 and lowp_vec1 types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_float1 extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_float1_precision extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_float1_precision + /// @{ + + /// 1 component vector of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, float, highp> highp_vec1; + + /// 1 component vector of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, float, mediump> mediump_vec1; + + /// 1 component vector of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, float, lowp> lowp_vec1; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float2.hpp new file mode 100644 index 000000000000..d31545dcc966 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_float2.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 2 components vector of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<2, float, defaultp> vec2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float2_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float2_precision.hpp new file mode 100644 index 000000000000..23c0820d0ae8 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_float2_precision.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 2 components vector of high single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, float, highp> highp_vec2; + + /// 2 components vector of medium single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, float, mediump> mediump_vec2; + + /// 2 components vector of low single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, float, lowp> lowp_vec2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float3.hpp new file mode 100644 index 000000000000..cd79a62004e4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_float3.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 3 components vector of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<3, float, defaultp> vec3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float3_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float3_precision.hpp new file mode 100644 index 000000000000..be640b531683 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_float3_precision.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 3 components vector of high single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, float, highp> highp_vec3; + + /// 3 components vector of medium single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, float, mediump> mediump_vec3; + + /// 3 components vector of low single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, float, lowp> lowp_vec3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float4.hpp new file mode 100644 index 000000000000..d84adcc22fd2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_float4.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 4 components vector of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<4, float, defaultp> vec4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float4_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float4_precision.hpp new file mode 100644 index 000000000000..aede83882e55 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_float4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_float4_precision.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 4 components vector of high single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, float, highp> highp_vec4; + + /// 4 components vector of medium single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, float, mediump> mediump_vec4; + + /// 4 components vector of low single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, float, lowp> lowp_vec4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int1.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int1.hpp new file mode 100644 index 000000000000..dc8603891a99 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int1.hpp @@ -0,0 +1,32 @@ +/// @ref ext_vector_int1 +/// @file glm/ext/vector_int1.hpp +/// +/// @defgroup ext_vector_int1 GLM_EXT_vector_int1 +/// @ingroup ext +/// +/// Exposes ivec1 vector type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_uint1 extension. +/// @see ext_vector_int1_precision extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_int1 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_int1 + /// @{ + + /// 1 component vector of signed integer numbers. + typedef vec<1, int, defaultp> ivec1; + + /// @} +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int1_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int1_sized.hpp new file mode 100644 index 000000000000..de0d4cf82e63 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int1_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_int1_sized +/// @file glm/ext/vector_int1_sized.hpp +/// +/// @defgroup ext_vector_int1_sized GLM_EXT_vector_int1_sized +/// @ingroup ext +/// +/// Exposes sized signed integer vector types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_int_sized +/// @see ext_vector_uint1_sized + +#pragma once + +#include "../ext/vector_int1.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_int1_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_int1_sized + /// @{ + + /// 8 bit signed integer vector of 1 component type. + /// + /// @see ext_vector_int1_sized + typedef vec<1, int8, defaultp> i8vec1; + + /// 16 bit signed integer vector of 1 component type. + /// + /// @see ext_vector_int1_sized + typedef vec<1, int16, defaultp> i16vec1; + + /// 32 bit signed integer vector of 1 component type. + /// + /// @see ext_vector_int1_sized + typedef vec<1, int32, defaultp> i32vec1; + + /// 64 bit signed integer vector of 1 component type. + /// + /// @see ext_vector_int1_sized + typedef vec<1, int64, defaultp> i64vec1; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int2.hpp new file mode 100644 index 000000000000..aef803e91b73 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_int2.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 2 components vector of signed integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<2, int, defaultp> ivec2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int2_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int2_sized.hpp new file mode 100644 index 000000000000..1fd57eef310d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_int2_sized +/// @file glm/ext/vector_int2_sized.hpp +/// +/// @defgroup ext_vector_int2_sized GLM_EXT_vector_int2_sized +/// @ingroup ext +/// +/// Exposes sized signed integer vector of 2 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_int_sized +/// @see ext_vector_uint2_sized + +#pragma once + +#include "../ext/vector_int2.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_int2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_int2_sized + /// @{ + + /// 8 bit signed integer vector of 2 components type. + /// + /// @see ext_vector_int2_sized + typedef vec<2, int8, defaultp> i8vec2; + + /// 16 bit signed integer vector of 2 components type. + /// + /// @see ext_vector_int2_sized + typedef vec<2, int16, defaultp> i16vec2; + + /// 32 bit signed integer vector of 2 components type. + /// + /// @see ext_vector_int2_sized + typedef vec<2, int32, defaultp> i32vec2; + + /// 64 bit signed integer vector of 2 components type. + /// + /// @see ext_vector_int2_sized + typedef vec<2, int64, defaultp> i64vec2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int3.hpp new file mode 100644 index 000000000000..4767e61e88c0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_int3.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 3 components vector of signed integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<3, int, defaultp> ivec3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int3_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int3_sized.hpp new file mode 100644 index 000000000000..085a3febbffd --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_int3_sized +/// @file glm/ext/vector_int3_sized.hpp +/// +/// @defgroup ext_vector_int3_sized GLM_EXT_vector_int3_sized +/// @ingroup ext +/// +/// Exposes sized signed integer vector of 3 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_int_sized +/// @see ext_vector_uint3_sized + +#pragma once + +#include "../ext/vector_int3.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_int3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_int3_sized + /// @{ + + /// 8 bit signed integer vector of 3 components type. + /// + /// @see ext_vector_int3_sized + typedef vec<3, int8, defaultp> i8vec3; + + /// 16 bit signed integer vector of 3 components type. + /// + /// @see ext_vector_int3_sized + typedef vec<3, int16, defaultp> i16vec3; + + /// 32 bit signed integer vector of 3 components type. + /// + /// @see ext_vector_int3_sized + typedef vec<3, int32, defaultp> i32vec3; + + /// 64 bit signed integer vector of 3 components type. + /// + /// @see ext_vector_int3_sized + typedef vec<3, int64, defaultp> i64vec3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int4.hpp new file mode 100644 index 000000000000..bb23adf706c2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_int4.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 4 components vector of signed integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<4, int, defaultp> ivec4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int4_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int4_sized.hpp new file mode 100644 index 000000000000..c63d46540b3e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_int4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_int4_sized +/// @file glm/ext/vector_int4_sized.hpp +/// +/// @defgroup ext_vector_int4_sized GLM_EXT_vector_int4_sized +/// @ingroup ext +/// +/// Exposes sized signed integer vector of 4 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_int_sized +/// @see ext_vector_uint4_sized + +#pragma once + +#include "../ext/vector_int4.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_int4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_int4_sized + /// @{ + + /// 8 bit signed integer vector of 4 components type. + /// + /// @see ext_vector_int4_sized + typedef vec<4, int8, defaultp> i8vec4; + + /// 16 bit signed integer vector of 4 components type. + /// + /// @see ext_vector_int4_sized + typedef vec<4, int16, defaultp> i16vec4; + + /// 32 bit signed integer vector of 4 components type. + /// + /// @see ext_vector_int4_sized + typedef vec<4, int32, defaultp> i32vec4; + + /// 64 bit signed integer vector of 4 components type. + /// + /// @see ext_vector_int4_sized + typedef vec<4, int64, defaultp> i64vec4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_integer.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_integer.hpp new file mode 100644 index 000000000000..1304dd8d660f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_integer.hpp @@ -0,0 +1,149 @@ +/// @ref ext_vector_integer +/// @file glm/ext/vector_integer.hpp +/// +/// @see core (dependence) +/// @see ext_vector_integer (dependence) +/// +/// @defgroup ext_vector_integer GLM_EXT_vector_integer +/// @ingroup ext +/// +/// Include to use the features of this extension. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_vectorize.hpp" +#include "../vector_relational.hpp" +#include "../common.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_integer + /// @{ + + /// Return true if the value is a power of two number. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec isPowerOfTwo(vec const& v); + + /// Return the power of two number which value is just higher the input value, + /// round up to a power of two. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec nextPowerOfTwo(vec const& v); + + /// Return the power of two number which value is just lower the input value, + /// round down to a power of two. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec prevPowerOfTwo(vec const& v); + + /// Return true if the 'Value' is a multiple of 'Multiple'. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec isMultiple(vec const& v, T Multiple); + + /// Return true if the 'Value' is a multiple of 'Multiple'. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec isMultiple(vec const& v, vec const& Multiple); + + /// Higher multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec nextMultiple(vec const& v, T Multiple); + + /// Higher multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec nextMultiple(vec const& v, vec const& Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec prevMultiple(vec const& v, T Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec prevMultiple(vec const& v, vec const& Multiple); + + /// Returns the bit number of the Nth significant bit set to + /// 1 in the binary representation of value. + /// If value bitcount is less than the Nth significant bit, -1 will be returned. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar types. + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec findNSB(vec const& Source, vec SignificantBitCount); + + /// @} +} //namespace glm + +#include "vector_integer.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_integer.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_integer.inl new file mode 100644 index 000000000000..cefb132e237a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_integer.inl @@ -0,0 +1,85 @@ +#include "scalar_integer.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec isPowerOfTwo(vec const& Value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isPowerOfTwo' only accept integer inputs"); + + vec const Result(abs(Value)); + return equal(Result & (Result - vec(1)), vec(0)); + } + + template + GLM_FUNC_QUALIFIER vec nextPowerOfTwo(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextPowerOfTwo' only accept integer inputs"); + + return detail::compute_ceilPowerOfTwo::is_signed>::call(v); + } + + template + GLM_FUNC_QUALIFIER vec prevPowerOfTwo(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevPowerOfTwo' only accept integer inputs"); + + return detail::functor1::call(prevPowerOfTwo, v); + } + + template + GLM_FUNC_QUALIFIER vec isMultiple(vec const& Value, T Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); + + return equal(Value % Multiple, vec(0)); + } + + template + GLM_FUNC_QUALIFIER vec isMultiple(vec const& Value, vec const& Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); + + return equal(Value % Multiple, vec(0)); + } + + template + GLM_FUNC_QUALIFIER vec nextMultiple(vec const& Source, T Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); + + return detail::functor2::call(nextMultiple, Source, vec(Multiple)); + } + + template + GLM_FUNC_QUALIFIER vec nextMultiple(vec const& Source, vec const& Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); + + return detail::functor2::call(nextMultiple, Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER vec prevMultiple(vec const& Source, T Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); + + return detail::functor2::call(prevMultiple, Source, vec(Multiple)); + } + + template + GLM_FUNC_QUALIFIER vec prevMultiple(vec const& Source, vec const& Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); + + return detail::functor2::call(prevMultiple, Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER vec findNSB(vec const& Source, vec SignificantBitCount) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findNSB' only accept integer inputs"); + + return detail::functor2_vec_int::call(findNSB, Source, SignificantBitCount); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_packing.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_packing.hpp new file mode 100644 index 000000000000..76e5d0cc6c2f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_packing.hpp @@ -0,0 +1,32 @@ +/// @ref ext_vector_packing +/// @file glm/ext/vector_packing.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_vector_packing GLM_EXT_vector_packing +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// This extension provides a set of function to convert vectors to packed +/// formats. + +#pragma once + +// Dependency: +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_packing extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_packing + /// @{ + + + /// @} +}// namespace glm + +#include "vector_packing.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_packing.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_packing.inl new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_reciprocal.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_reciprocal.hpp new file mode 100644 index 000000000000..84d67662f26e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_reciprocal.hpp @@ -0,0 +1,135 @@ +/// @ref ext_vector_reciprocal +/// @file glm/ext/vector_reciprocal.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_vector_reciprocal GLM_EXT_vector_reciprocal +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Define secant, cosecant and cotangent functions. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_reciprocal extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_reciprocal + /// @{ + + /// Secant function. + /// hypotenuse / adjacent or 1 / cos(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType sec(genType angle); + + /// Cosecant function. + /// hypotenuse / opposite or 1 / sin(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType csc(genType angle); + + /// Cotangent function. + /// adjacent / opposite or 1 / tan(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType cot(genType angle); + + /// Inverse secant function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType asec(genType x); + + /// Inverse cosecant function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType acsc(genType x); + + /// Inverse cotangent function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType acot(genType x); + + /// Secant hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType sech(genType angle); + + /// Cosecant hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType csch(genType angle); + + /// Cotangent hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType coth(genType angle); + + /// Inverse secant hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType asech(genType x); + + /// Inverse cosecant hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType acsch(genType x); + + /// Inverse cotangent hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType acoth(genType x); + + /// @} +}//namespace glm + +#include "vector_reciprocal.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_reciprocal.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_reciprocal.inl new file mode 100644 index 000000000000..b85102a2f578 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_reciprocal.inl @@ -0,0 +1,105 @@ +/// @ref ext_vector_reciprocal + +#include "../trigonometric.hpp" +#include + +namespace glm +{ + // sec + template + GLM_FUNC_QUALIFIER vec sec(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'sec' only accept floating-point inputs"); + return static_cast(1) / detail::functor1::call(cos, x); + } + + // csc + template + GLM_FUNC_QUALIFIER vec csc(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'csc' only accept floating-point inputs"); + return static_cast(1) / detail::functor1::call(sin, x); + } + + // cot + template + GLM_FUNC_QUALIFIER vec cot(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'cot' only accept floating-point inputs"); + T const pi_over_2 = static_cast(3.1415926535897932384626433832795 / 2.0); + return detail::functor1::call(tan, pi_over_2 - x); + } + + // asec + template + GLM_FUNC_QUALIFIER vec asec(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'asec' only accept floating-point inputs"); + return detail::functor1::call(acos, static_cast(1) / x); + } + + // acsc + template + GLM_FUNC_QUALIFIER vec acsc(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acsc' only accept floating-point inputs"); + return detail::functor1::call(asin, static_cast(1) / x); + } + + // acot + template + GLM_FUNC_QUALIFIER vec acot(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acot' only accept floating-point inputs"); + T const pi_over_2 = static_cast(3.1415926535897932384626433832795 / 2.0); + return pi_over_2 - detail::functor1::call(atan, x); + } + + // sech + template + GLM_FUNC_QUALIFIER vec sech(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'sech' only accept floating-point inputs"); + return static_cast(1) / detail::functor1::call(cosh, x); + } + + // csch + template + GLM_FUNC_QUALIFIER vec csch(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'csch' only accept floating-point inputs"); + return static_cast(1) / detail::functor1::call(sinh, x); + } + + // coth + template + GLM_FUNC_QUALIFIER vec coth(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'coth' only accept floating-point inputs"); + return glm::cosh(x) / glm::sinh(x); + } + + // asech + template + GLM_FUNC_QUALIFIER vec asech(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'asech' only accept floating-point inputs"); + return detail::functor1::call(acosh, static_cast(1) / x); + } + + // acsch + template + GLM_FUNC_QUALIFIER vec acsch(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acsch' only accept floating-point inputs"); + return detail::functor1::call(asinh, static_cast(1) / x); + } + + // acoth + template + GLM_FUNC_QUALIFIER vec acoth(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acoth' only accept floating-point inputs"); + return detail::functor1::call(atanh, static_cast(1) / x); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_relational.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_relational.hpp new file mode 100644 index 000000000000..1c2367dc0231 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_relational.hpp @@ -0,0 +1,107 @@ +/// @ref ext_vector_relational +/// @file glm/ext/vector_relational.hpp +/// +/// @see core (dependence) +/// @see ext_scalar_integer (dependence) +/// +/// @defgroup ext_vector_relational GLM_EXT_vector_relational +/// @ingroup ext +/// +/// Exposes comparison functions for vector types that take a user defined epsilon values. +/// +/// Include to use the features of this extension. +/// +/// @see core_vector_relational +/// @see ext_scalar_relational +/// @see ext_matrix_relational + +#pragma once + +// Dependencies +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_relational extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_relational + /// @{ + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, T epsilon); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& epsilon); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// True if this expression is not satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, T epsilon); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// True if this expression is not satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& epsilon); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, int ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is not satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, int ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is not satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& ULPs); + + /// @} +}//namespace glm + +#include "vector_relational.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_relational.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_relational.inl new file mode 100644 index 000000000000..7a39ab50897e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_relational.inl @@ -0,0 +1,75 @@ +#include "../vector_relational.hpp" +#include "../common.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/type_float.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, T Epsilon) + { + return equal(x, y, vec(Epsilon)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& Epsilon) + { + return lessThanEqual(abs(x - y), Epsilon); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, T Epsilon) + { + return notEqual(x, y, vec(Epsilon)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& Epsilon) + { + return greaterThan(abs(x - y), Epsilon); + } + + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, int MaxULPs) + { + return equal(x, y, vec(MaxULPs)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& MaxULPs) + { + vec Result(false); + for(length_t i = 0; i < L; ++i) + { + detail::float_t const a(x[i]); + detail::float_t const b(y[i]); + + // Different signs means they do not match. + if(a.negative() != b.negative()) + { + // Check for equality to make sure +0==-0 + Result[i] = a.mantissa() == b.mantissa() && a.exponent() == b.exponent(); + } + else + { + // Find the difference in ULPs. + typename detail::float_t::int_type const DiffULPs = abs(a.i - b.i); + Result[i] = DiffULPs <= MaxULPs[i]; + } + } + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, int MaxULPs) + { + return notEqual(x, y, vec(MaxULPs)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& MaxULPs) + { + return not_(equal(x, y, MaxULPs)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint1.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint1.hpp new file mode 100644 index 000000000000..eb8a7049761f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint1.hpp @@ -0,0 +1,32 @@ +/// @ref ext_vector_uint1 +/// @file glm/ext/vector_uint1.hpp +/// +/// @defgroup ext_vector_uint1 GLM_EXT_vector_uint1 +/// @ingroup ext +/// +/// Exposes uvec1 vector type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_int1 extension. +/// @see ext_vector_uint1_precision extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_uint1 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_uint1 + /// @{ + + /// 1 component vector of unsigned integer numbers. + typedef vec<1, unsigned int, defaultp> uvec1; + + /// @} +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint1_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint1_sized.hpp new file mode 100644 index 000000000000..2a938bbaf616 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint1_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_uint1_sized +/// @file glm/ext/vector_uint1_sized.hpp +/// +/// @defgroup ext_vector_uint1_sized GLM_EXT_vector_uint1_sized +/// @ingroup ext +/// +/// Exposes sized unsigned integer vector types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_uint_sized +/// @see ext_vector_int1_sized + +#pragma once + +#include "../ext/vector_uint1.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_uint1_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_uint1_sized + /// @{ + + /// 8 bit unsigned integer vector of 1 component type. + /// + /// @see ext_vector_uint1_sized + typedef vec<1, uint8, defaultp> u8vec1; + + /// 16 bit unsigned integer vector of 1 component type. + /// + /// @see ext_vector_uint1_sized + typedef vec<1, uint16, defaultp> u16vec1; + + /// 32 bit unsigned integer vector of 1 component type. + /// + /// @see ext_vector_uint1_sized + typedef vec<1, uint32, defaultp> u32vec1; + + /// 64 bit unsigned integer vector of 1 component type. + /// + /// @see ext_vector_uint1_sized + typedef vec<1, uint64, defaultp> u64vec1; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint2.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint2.hpp new file mode 100644 index 000000000000..03c00f5ff584 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_uint2.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 2 components vector of unsigned integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<2, unsigned int, defaultp> uvec2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint2_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint2_sized.hpp new file mode 100644 index 000000000000..620fdc6ece39 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_uint2_sized +/// @file glm/ext/vector_uint2_sized.hpp +/// +/// @defgroup ext_vector_uint2_sized GLM_EXT_vector_uint2_sized +/// @ingroup ext +/// +/// Exposes sized unsigned integer vector of 2 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_uint_sized +/// @see ext_vector_int2_sized + +#pragma once + +#include "../ext/vector_uint2.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_uint2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_uint2_sized + /// @{ + + /// 8 bit unsigned integer vector of 2 components type. + /// + /// @see ext_vector_uint2_sized + typedef vec<2, uint8, defaultp> u8vec2; + + /// 16 bit unsigned integer vector of 2 components type. + /// + /// @see ext_vector_uint2_sized + typedef vec<2, uint16, defaultp> u16vec2; + + /// 32 bit unsigned integer vector of 2 components type. + /// + /// @see ext_vector_uint2_sized + typedef vec<2, uint32, defaultp> u32vec2; + + /// 64 bit unsigned integer vector of 2 components type. + /// + /// @see ext_vector_uint2_sized + typedef vec<2, uint64, defaultp> u64vec2; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint3.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint3.hpp new file mode 100644 index 000000000000..f5b41c40882a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_uint3.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 3 components vector of unsigned integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<3, unsigned int, defaultp> uvec3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint3_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint3_sized.hpp new file mode 100644 index 000000000000..6f96b98e276f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_uint3_sized +/// @file glm/ext/vector_uint3_sized.hpp +/// +/// @defgroup ext_vector_uint3_sized GLM_EXT_vector_uint3_sized +/// @ingroup ext +/// +/// Exposes sized unsigned integer vector of 3 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_uint_sized +/// @see ext_vector_int3_sized + +#pragma once + +#include "../ext/vector_uint3.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_uint3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_uint3_sized + /// @{ + + /// 8 bit unsigned integer vector of 3 components type. + /// + /// @see ext_vector_uint3_sized + typedef vec<3, uint8, defaultp> u8vec3; + + /// 16 bit unsigned integer vector of 3 components type. + /// + /// @see ext_vector_uint3_sized + typedef vec<3, uint16, defaultp> u16vec3; + + /// 32 bit unsigned integer vector of 3 components type. + /// + /// @see ext_vector_uint3_sized + typedef vec<3, uint32, defaultp> u32vec3; + + /// 64 bit unsigned integer vector of 3 components type. + /// + /// @see ext_vector_uint3_sized + typedef vec<3, uint64, defaultp> u64vec3; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint4.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint4.hpp new file mode 100644 index 000000000000..32ced58a8f03 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_uint4.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 4 components vector of unsigned integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<4, unsigned int, defaultp> uvec4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint4_sized.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint4_sized.hpp new file mode 100644 index 000000000000..da992ea2da86 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_uint4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_uint4_sized +/// @file glm/ext/vector_uint4_sized.hpp +/// +/// @defgroup ext_vector_uint4_sized GLM_EXT_vector_uint4_sized +/// @ingroup ext +/// +/// Exposes sized unsigned integer vector of 4 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_uint_sized +/// @see ext_vector_int4_sized + +#pragma once + +#include "../ext/vector_uint4.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_uint4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_uint4_sized + /// @{ + + /// 8 bit unsigned integer vector of 4 components type. + /// + /// @see ext_vector_uint4_sized + typedef vec<4, uint8, defaultp> u8vec4; + + /// 16 bit unsigned integer vector of 4 components type. + /// + /// @see ext_vector_uint4_sized + typedef vec<4, uint16, defaultp> u16vec4; + + /// 32 bit unsigned integer vector of 4 components type. + /// + /// @see ext_vector_uint4_sized + typedef vec<4, uint32, defaultp> u32vec4; + + /// 64 bit unsigned integer vector of 4 components type. + /// + /// @see ext_vector_uint4_sized + typedef vec<4, uint64, defaultp> u64vec4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_ulp.hpp b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_ulp.hpp new file mode 100644 index 000000000000..7c539bbf8866 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_ulp.hpp @@ -0,0 +1,112 @@ +/// @ref ext_vector_ulp +/// @file glm/ext/vector_ulp.hpp +/// +/// @defgroup ext_vector_ulp GLM_EXT_vector_ulp +/// @ingroup ext +/// +/// Allow the measurement of the accuracy of a function against a reference +/// implementation. This extension works on floating-point data and provide results +/// in ULP. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_ulp +/// @see ext_scalar_relational +/// @see ext_vector_relational + +#pragma once + +// Dependencies +#include "../ext/scalar_ulp.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_ulp extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_ulp + /// @{ + + /// Return the next ULP value(s) after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec nextFloat(vec const& x); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec nextFloat(vec const& x, int ULPs); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec nextFloat(vec const& x, vec const& ULPs); + + /// Return the previous ULP value(s) before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec prevFloat(vec const& x); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec prevFloat(vec const& x, int ULPs); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec prevFloat(vec const& x, vec const& ULPs); + + /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec floatDistance(vec const& x, vec const& y); + + /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec floatDistance(vec const& x, vec const& y); + + /// @} +}//namespace glm + +#include "vector_ulp.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/ext/vector_ulp.inl b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_ulp.inl new file mode 100644 index 000000000000..91565ce51074 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/ext/vector_ulp.inl @@ -0,0 +1,74 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec nextFloat(vec const& x) + { + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = nextFloat(x[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec nextFloat(vec const& x, int ULPs) + { + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = nextFloat(x[i], ULPs); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec nextFloat(vec const& x, vec const& ULPs) + { + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = nextFloat(x[i], ULPs[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prevFloat(vec const& x) + { + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prevFloat(x[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prevFloat(vec const& x, int ULPs) + { + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prevFloat(x[i], ULPs); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prevFloat(vec const& x, vec const& ULPs) + { + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prevFloat(x[i], ULPs[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec floatDistance(vec const& x, vec const& y) + { + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = floatDistance(x[i], y[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec floatDistance(vec const& x, vec const& y) + { + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = floatDistance(x[i], y[i]); + return Result; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/fwd.hpp b/thirdparty/manifold/thirdparty/glm/glm/fwd.hpp new file mode 100644 index 000000000000..9c2e5eafa4ea --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/fwd.hpp @@ -0,0 +1,1233 @@ +#pragma once + +#include "detail/qualifier.hpp" + +namespace glm +{ +#if GLM_HAS_EXTENDED_INTEGER_TYPE + typedef std::int8_t int8; + typedef std::int16_t int16; + typedef std::int32_t int32; + typedef std::int64_t int64; + + typedef std::uint8_t uint8; + typedef std::uint16_t uint16; + typedef std::uint32_t uint32; + typedef std::uint64_t uint64; +#else + typedef signed char int8; + typedef signed short int16; + typedef signed int int32; + typedef detail::int64 int64; + + typedef unsigned char uint8; + typedef unsigned short uint16; + typedef unsigned int uint32; + typedef detail::uint64 uint64; +#endif + + // Scalar int + + typedef int8 lowp_i8; + typedef int8 mediump_i8; + typedef int8 highp_i8; + typedef int8 i8; + + typedef int8 lowp_int8; + typedef int8 mediump_int8; + typedef int8 highp_int8; + + typedef int8 lowp_int8_t; + typedef int8 mediump_int8_t; + typedef int8 highp_int8_t; + typedef int8 int8_t; + + typedef int16 lowp_i16; + typedef int16 mediump_i16; + typedef int16 highp_i16; + typedef int16 i16; + + typedef int16 lowp_int16; + typedef int16 mediump_int16; + typedef int16 highp_int16; + + typedef int16 lowp_int16_t; + typedef int16 mediump_int16_t; + typedef int16 highp_int16_t; + typedef int16 int16_t; + + typedef int32 lowp_i32; + typedef int32 mediump_i32; + typedef int32 highp_i32; + typedef int32 i32; + + typedef int32 lowp_int32; + typedef int32 mediump_int32; + typedef int32 highp_int32; + + typedef int32 lowp_int32_t; + typedef int32 mediump_int32_t; + typedef int32 highp_int32_t; + typedef int32 int32_t; + + typedef int64 lowp_i64; + typedef int64 mediump_i64; + typedef int64 highp_i64; + typedef int64 i64; + + typedef int64 lowp_int64; + typedef int64 mediump_int64; + typedef int64 highp_int64; + + typedef int64 lowp_int64_t; + typedef int64 mediump_int64_t; + typedef int64 highp_int64_t; + typedef int64 int64_t; + + // Scalar uint + + typedef unsigned int uint; + + typedef uint8 lowp_u8; + typedef uint8 mediump_u8; + typedef uint8 highp_u8; + typedef uint8 u8; + + typedef uint8 lowp_uint8; + typedef uint8 mediump_uint8; + typedef uint8 highp_uint8; + + typedef uint8 lowp_uint8_t; + typedef uint8 mediump_uint8_t; + typedef uint8 highp_uint8_t; + typedef uint8 uint8_t; + + typedef uint16 lowp_u16; + typedef uint16 mediump_u16; + typedef uint16 highp_u16; + typedef uint16 u16; + + typedef uint16 lowp_uint16; + typedef uint16 mediump_uint16; + typedef uint16 highp_uint16; + + typedef uint16 lowp_uint16_t; + typedef uint16 mediump_uint16_t; + typedef uint16 highp_uint16_t; + typedef uint16 uint16_t; + + typedef uint32 lowp_u32; + typedef uint32 mediump_u32; + typedef uint32 highp_u32; + typedef uint32 u32; + + typedef uint32 lowp_uint32; + typedef uint32 mediump_uint32; + typedef uint32 highp_uint32; + + typedef uint32 lowp_uint32_t; + typedef uint32 mediump_uint32_t; + typedef uint32 highp_uint32_t; + typedef uint32 uint32_t; + + typedef uint64 lowp_u64; + typedef uint64 mediump_u64; + typedef uint64 highp_u64; + typedef uint64 u64; + + typedef uint64 lowp_uint64; + typedef uint64 mediump_uint64; + typedef uint64 highp_uint64; + + typedef uint64 lowp_uint64_t; + typedef uint64 mediump_uint64_t; + typedef uint64 highp_uint64_t; + typedef uint64 uint64_t; + + // Scalar float + + typedef float lowp_f32; + typedef float mediump_f32; + typedef float highp_f32; + typedef float f32; + + typedef float lowp_float32; + typedef float mediump_float32; + typedef float highp_float32; + typedef float float32; + + typedef float lowp_float32_t; + typedef float mediump_float32_t; + typedef float highp_float32_t; + typedef float float32_t; + + + typedef double lowp_f64; + typedef double mediump_f64; + typedef double highp_f64; + typedef double f64; + + typedef double lowp_float64; + typedef double mediump_float64; + typedef double highp_float64; + typedef double float64; + + typedef double lowp_float64_t; + typedef double mediump_float64_t; + typedef double highp_float64_t; + typedef double float64_t; + + // Vector bool + + typedef vec<1, bool, lowp> lowp_bvec1; + typedef vec<2, bool, lowp> lowp_bvec2; + typedef vec<3, bool, lowp> lowp_bvec3; + typedef vec<4, bool, lowp> lowp_bvec4; + + typedef vec<1, bool, mediump> mediump_bvec1; + typedef vec<2, bool, mediump> mediump_bvec2; + typedef vec<3, bool, mediump> mediump_bvec3; + typedef vec<4, bool, mediump> mediump_bvec4; + + typedef vec<1, bool, highp> highp_bvec1; + typedef vec<2, bool, highp> highp_bvec2; + typedef vec<3, bool, highp> highp_bvec3; + typedef vec<4, bool, highp> highp_bvec4; + + typedef vec<1, bool, defaultp> bvec1; + typedef vec<2, bool, defaultp> bvec2; + typedef vec<3, bool, defaultp> bvec3; + typedef vec<4, bool, defaultp> bvec4; + + // Vector int + + typedef vec<1, int, lowp> lowp_ivec1; + typedef vec<2, int, lowp> lowp_ivec2; + typedef vec<3, int, lowp> lowp_ivec3; + typedef vec<4, int, lowp> lowp_ivec4; + + typedef vec<1, int, mediump> mediump_ivec1; + typedef vec<2, int, mediump> mediump_ivec2; + typedef vec<3, int, mediump> mediump_ivec3; + typedef vec<4, int, mediump> mediump_ivec4; + + typedef vec<1, int, highp> highp_ivec1; + typedef vec<2, int, highp> highp_ivec2; + typedef vec<3, int, highp> highp_ivec3; + typedef vec<4, int, highp> highp_ivec4; + + typedef vec<1, int, defaultp> ivec1; + typedef vec<2, int, defaultp> ivec2; + typedef vec<3, int, defaultp> ivec3; + typedef vec<4, int, defaultp> ivec4; + + typedef vec<1, i8, lowp> lowp_i8vec1; + typedef vec<2, i8, lowp> lowp_i8vec2; + typedef vec<3, i8, lowp> lowp_i8vec3; + typedef vec<4, i8, lowp> lowp_i8vec4; + + typedef vec<1, i8, mediump> mediump_i8vec1; + typedef vec<2, i8, mediump> mediump_i8vec2; + typedef vec<3, i8, mediump> mediump_i8vec3; + typedef vec<4, i8, mediump> mediump_i8vec4; + + typedef vec<1, i8, highp> highp_i8vec1; + typedef vec<2, i8, highp> highp_i8vec2; + typedef vec<3, i8, highp> highp_i8vec3; + typedef vec<4, i8, highp> highp_i8vec4; + + typedef vec<1, i8, defaultp> i8vec1; + typedef vec<2, i8, defaultp> i8vec2; + typedef vec<3, i8, defaultp> i8vec3; + typedef vec<4, i8, defaultp> i8vec4; + + typedef vec<1, i16, lowp> lowp_i16vec1; + typedef vec<2, i16, lowp> lowp_i16vec2; + typedef vec<3, i16, lowp> lowp_i16vec3; + typedef vec<4, i16, lowp> lowp_i16vec4; + + typedef vec<1, i16, mediump> mediump_i16vec1; + typedef vec<2, i16, mediump> mediump_i16vec2; + typedef vec<3, i16, mediump> mediump_i16vec3; + typedef vec<4, i16, mediump> mediump_i16vec4; + + typedef vec<1, i16, highp> highp_i16vec1; + typedef vec<2, i16, highp> highp_i16vec2; + typedef vec<3, i16, highp> highp_i16vec3; + typedef vec<4, i16, highp> highp_i16vec4; + + typedef vec<1, i16, defaultp> i16vec1; + typedef vec<2, i16, defaultp> i16vec2; + typedef vec<3, i16, defaultp> i16vec3; + typedef vec<4, i16, defaultp> i16vec4; + + typedef vec<1, i32, lowp> lowp_i32vec1; + typedef vec<2, i32, lowp> lowp_i32vec2; + typedef vec<3, i32, lowp> lowp_i32vec3; + typedef vec<4, i32, lowp> lowp_i32vec4; + + typedef vec<1, i32, mediump> mediump_i32vec1; + typedef vec<2, i32, mediump> mediump_i32vec2; + typedef vec<3, i32, mediump> mediump_i32vec3; + typedef vec<4, i32, mediump> mediump_i32vec4; + + typedef vec<1, i32, highp> highp_i32vec1; + typedef vec<2, i32, highp> highp_i32vec2; + typedef vec<3, i32, highp> highp_i32vec3; + typedef vec<4, i32, highp> highp_i32vec4; + + typedef vec<1, i32, defaultp> i32vec1; + typedef vec<2, i32, defaultp> i32vec2; + typedef vec<3, i32, defaultp> i32vec3; + typedef vec<4, i32, defaultp> i32vec4; + + typedef vec<1, i64, lowp> lowp_i64vec1; + typedef vec<2, i64, lowp> lowp_i64vec2; + typedef vec<3, i64, lowp> lowp_i64vec3; + typedef vec<4, i64, lowp> lowp_i64vec4; + + typedef vec<1, i64, mediump> mediump_i64vec1; + typedef vec<2, i64, mediump> mediump_i64vec2; + typedef vec<3, i64, mediump> mediump_i64vec3; + typedef vec<4, i64, mediump> mediump_i64vec4; + + typedef vec<1, i64, highp> highp_i64vec1; + typedef vec<2, i64, highp> highp_i64vec2; + typedef vec<3, i64, highp> highp_i64vec3; + typedef vec<4, i64, highp> highp_i64vec4; + + typedef vec<1, i64, defaultp> i64vec1; + typedef vec<2, i64, defaultp> i64vec2; + typedef vec<3, i64, defaultp> i64vec3; + typedef vec<4, i64, defaultp> i64vec4; + + // Vector uint + + typedef vec<1, uint, lowp> lowp_uvec1; + typedef vec<2, uint, lowp> lowp_uvec2; + typedef vec<3, uint, lowp> lowp_uvec3; + typedef vec<4, uint, lowp> lowp_uvec4; + + typedef vec<1, uint, mediump> mediump_uvec1; + typedef vec<2, uint, mediump> mediump_uvec2; + typedef vec<3, uint, mediump> mediump_uvec3; + typedef vec<4, uint, mediump> mediump_uvec4; + + typedef vec<1, uint, highp> highp_uvec1; + typedef vec<2, uint, highp> highp_uvec2; + typedef vec<3, uint, highp> highp_uvec3; + typedef vec<4, uint, highp> highp_uvec4; + + typedef vec<1, uint, defaultp> uvec1; + typedef vec<2, uint, defaultp> uvec2; + typedef vec<3, uint, defaultp> uvec3; + typedef vec<4, uint, defaultp> uvec4; + + typedef vec<1, u8, lowp> lowp_u8vec1; + typedef vec<2, u8, lowp> lowp_u8vec2; + typedef vec<3, u8, lowp> lowp_u8vec3; + typedef vec<4, u8, lowp> lowp_u8vec4; + + typedef vec<1, u8, mediump> mediump_u8vec1; + typedef vec<2, u8, mediump> mediump_u8vec2; + typedef vec<3, u8, mediump> mediump_u8vec3; + typedef vec<4, u8, mediump> mediump_u8vec4; + + typedef vec<1, u8, highp> highp_u8vec1; + typedef vec<2, u8, highp> highp_u8vec2; + typedef vec<3, u8, highp> highp_u8vec3; + typedef vec<4, u8, highp> highp_u8vec4; + + typedef vec<1, u8, defaultp> u8vec1; + typedef vec<2, u8, defaultp> u8vec2; + typedef vec<3, u8, defaultp> u8vec3; + typedef vec<4, u8, defaultp> u8vec4; + + typedef vec<1, u16, lowp> lowp_u16vec1; + typedef vec<2, u16, lowp> lowp_u16vec2; + typedef vec<3, u16, lowp> lowp_u16vec3; + typedef vec<4, u16, lowp> lowp_u16vec4; + + typedef vec<1, u16, mediump> mediump_u16vec1; + typedef vec<2, u16, mediump> mediump_u16vec2; + typedef vec<3, u16, mediump> mediump_u16vec3; + typedef vec<4, u16, mediump> mediump_u16vec4; + + typedef vec<1, u16, highp> highp_u16vec1; + typedef vec<2, u16, highp> highp_u16vec2; + typedef vec<3, u16, highp> highp_u16vec3; + typedef vec<4, u16, highp> highp_u16vec4; + + typedef vec<1, u16, defaultp> u16vec1; + typedef vec<2, u16, defaultp> u16vec2; + typedef vec<3, u16, defaultp> u16vec3; + typedef vec<4, u16, defaultp> u16vec4; + + typedef vec<1, u32, lowp> lowp_u32vec1; + typedef vec<2, u32, lowp> lowp_u32vec2; + typedef vec<3, u32, lowp> lowp_u32vec3; + typedef vec<4, u32, lowp> lowp_u32vec4; + + typedef vec<1, u32, mediump> mediump_u32vec1; + typedef vec<2, u32, mediump> mediump_u32vec2; + typedef vec<3, u32, mediump> mediump_u32vec3; + typedef vec<4, u32, mediump> mediump_u32vec4; + + typedef vec<1, u32, highp> highp_u32vec1; + typedef vec<2, u32, highp> highp_u32vec2; + typedef vec<3, u32, highp> highp_u32vec3; + typedef vec<4, u32, highp> highp_u32vec4; + + typedef vec<1, u32, defaultp> u32vec1; + typedef vec<2, u32, defaultp> u32vec2; + typedef vec<3, u32, defaultp> u32vec3; + typedef vec<4, u32, defaultp> u32vec4; + + typedef vec<1, u64, lowp> lowp_u64vec1; + typedef vec<2, u64, lowp> lowp_u64vec2; + typedef vec<3, u64, lowp> lowp_u64vec3; + typedef vec<4, u64, lowp> lowp_u64vec4; + + typedef vec<1, u64, mediump> mediump_u64vec1; + typedef vec<2, u64, mediump> mediump_u64vec2; + typedef vec<3, u64, mediump> mediump_u64vec3; + typedef vec<4, u64, mediump> mediump_u64vec4; + + typedef vec<1, u64, highp> highp_u64vec1; + typedef vec<2, u64, highp> highp_u64vec2; + typedef vec<3, u64, highp> highp_u64vec3; + typedef vec<4, u64, highp> highp_u64vec4; + + typedef vec<1, u64, defaultp> u64vec1; + typedef vec<2, u64, defaultp> u64vec2; + typedef vec<3, u64, defaultp> u64vec3; + typedef vec<4, u64, defaultp> u64vec4; + + // Vector float + + typedef vec<1, float, lowp> lowp_vec1; + typedef vec<2, float, lowp> lowp_vec2; + typedef vec<3, float, lowp> lowp_vec3; + typedef vec<4, float, lowp> lowp_vec4; + + typedef vec<1, float, mediump> mediump_vec1; + typedef vec<2, float, mediump> mediump_vec2; + typedef vec<3, float, mediump> mediump_vec3; + typedef vec<4, float, mediump> mediump_vec4; + + typedef vec<1, float, highp> highp_vec1; + typedef vec<2, float, highp> highp_vec2; + typedef vec<3, float, highp> highp_vec3; + typedef vec<4, float, highp> highp_vec4; + + typedef vec<1, float, defaultp> vec1; + typedef vec<2, float, defaultp> vec2; + typedef vec<3, float, defaultp> vec3; + typedef vec<4, float, defaultp> vec4; + + typedef vec<1, float, lowp> lowp_fvec1; + typedef vec<2, float, lowp> lowp_fvec2; + typedef vec<3, float, lowp> lowp_fvec3; + typedef vec<4, float, lowp> lowp_fvec4; + + typedef vec<1, float, mediump> mediump_fvec1; + typedef vec<2, float, mediump> mediump_fvec2; + typedef vec<3, float, mediump> mediump_fvec3; + typedef vec<4, float, mediump> mediump_fvec4; + + typedef vec<1, float, highp> highp_fvec1; + typedef vec<2, float, highp> highp_fvec2; + typedef vec<3, float, highp> highp_fvec3; + typedef vec<4, float, highp> highp_fvec4; + + typedef vec<1, f32, defaultp> fvec1; + typedef vec<2, f32, defaultp> fvec2; + typedef vec<3, f32, defaultp> fvec3; + typedef vec<4, f32, defaultp> fvec4; + + typedef vec<1, f32, lowp> lowp_f32vec1; + typedef vec<2, f32, lowp> lowp_f32vec2; + typedef vec<3, f32, lowp> lowp_f32vec3; + typedef vec<4, f32, lowp> lowp_f32vec4; + + typedef vec<1, f32, mediump> mediump_f32vec1; + typedef vec<2, f32, mediump> mediump_f32vec2; + typedef vec<3, f32, mediump> mediump_f32vec3; + typedef vec<4, f32, mediump> mediump_f32vec4; + + typedef vec<1, f32, highp> highp_f32vec1; + typedef vec<2, f32, highp> highp_f32vec2; + typedef vec<3, f32, highp> highp_f32vec3; + typedef vec<4, f32, highp> highp_f32vec4; + + typedef vec<1, f32, defaultp> f32vec1; + typedef vec<2, f32, defaultp> f32vec2; + typedef vec<3, f32, defaultp> f32vec3; + typedef vec<4, f32, defaultp> f32vec4; + + typedef vec<1, f64, lowp> lowp_dvec1; + typedef vec<2, f64, lowp> lowp_dvec2; + typedef vec<3, f64, lowp> lowp_dvec3; + typedef vec<4, f64, lowp> lowp_dvec4; + + typedef vec<1, f64, mediump> mediump_dvec1; + typedef vec<2, f64, mediump> mediump_dvec2; + typedef vec<3, f64, mediump> mediump_dvec3; + typedef vec<4, f64, mediump> mediump_dvec4; + + typedef vec<1, f64, highp> highp_dvec1; + typedef vec<2, f64, highp> highp_dvec2; + typedef vec<3, f64, highp> highp_dvec3; + typedef vec<4, f64, highp> highp_dvec4; + + typedef vec<1, f64, defaultp> dvec1; + typedef vec<2, f64, defaultp> dvec2; + typedef vec<3, f64, defaultp> dvec3; + typedef vec<4, f64, defaultp> dvec4; + + typedef vec<1, f64, lowp> lowp_f64vec1; + typedef vec<2, f64, lowp> lowp_f64vec2; + typedef vec<3, f64, lowp> lowp_f64vec3; + typedef vec<4, f64, lowp> lowp_f64vec4; + + typedef vec<1, f64, mediump> mediump_f64vec1; + typedef vec<2, f64, mediump> mediump_f64vec2; + typedef vec<3, f64, mediump> mediump_f64vec3; + typedef vec<4, f64, mediump> mediump_f64vec4; + + typedef vec<1, f64, highp> highp_f64vec1; + typedef vec<2, f64, highp> highp_f64vec2; + typedef vec<3, f64, highp> highp_f64vec3; + typedef vec<4, f64, highp> highp_f64vec4; + + typedef vec<1, f64, defaultp> f64vec1; + typedef vec<2, f64, defaultp> f64vec2; + typedef vec<3, f64, defaultp> f64vec3; + typedef vec<4, f64, defaultp> f64vec4; + + // Matrix NxN + + typedef mat<2, 2, f32, lowp> lowp_mat2; + typedef mat<3, 3, f32, lowp> lowp_mat3; + typedef mat<4, 4, f32, lowp> lowp_mat4; + + typedef mat<2, 2, f32, mediump> mediump_mat2; + typedef mat<3, 3, f32, mediump> mediump_mat3; + typedef mat<4, 4, f32, mediump> mediump_mat4; + + typedef mat<2, 2, f32, highp> highp_mat2; + typedef mat<3, 3, f32, highp> highp_mat3; + typedef mat<4, 4, f32, highp> highp_mat4; + + typedef mat<2, 2, f32, defaultp> mat2; + typedef mat<3, 3, f32, defaultp> mat3; + typedef mat<4, 4, f32, defaultp> mat4; + + typedef mat<2, 2, f32, lowp> lowp_fmat2; + typedef mat<3, 3, f32, lowp> lowp_fmat3; + typedef mat<4, 4, f32, lowp> lowp_fmat4; + + typedef mat<2, 2, f32, mediump> mediump_fmat2; + typedef mat<3, 3, f32, mediump> mediump_fmat3; + typedef mat<4, 4, f32, mediump> mediump_fmat4; + + typedef mat<2, 2, f32, highp> highp_fmat2; + typedef mat<3, 3, f32, highp> highp_fmat3; + typedef mat<4, 4, f32, highp> highp_fmat4; + + typedef mat<2, 2, f32, defaultp> fmat2; + typedef mat<3, 3, f32, defaultp> fmat3; + typedef mat<4, 4, f32, defaultp> fmat4; + + typedef mat<2, 2, f32, lowp> lowp_f32mat2; + typedef mat<3, 3, f32, lowp> lowp_f32mat3; + typedef mat<4, 4, f32, lowp> lowp_f32mat4; + + typedef mat<2, 2, f32, mediump> mediump_f32mat2; + typedef mat<3, 3, f32, mediump> mediump_f32mat3; + typedef mat<4, 4, f32, mediump> mediump_f32mat4; + + typedef mat<2, 2, f32, highp> highp_f32mat2; + typedef mat<3, 3, f32, highp> highp_f32mat3; + typedef mat<4, 4, f32, highp> highp_f32mat4; + + typedef mat<2, 2, f32, defaultp> f32mat2; + typedef mat<3, 3, f32, defaultp> f32mat3; + typedef mat<4, 4, f32, defaultp> f32mat4; + + typedef mat<2, 2, f64, lowp> lowp_dmat2; + typedef mat<3, 3, f64, lowp> lowp_dmat3; + typedef mat<4, 4, f64, lowp> lowp_dmat4; + + typedef mat<2, 2, f64, mediump> mediump_dmat2; + typedef mat<3, 3, f64, mediump> mediump_dmat3; + typedef mat<4, 4, f64, mediump> mediump_dmat4; + + typedef mat<2, 2, f64, highp> highp_dmat2; + typedef mat<3, 3, f64, highp> highp_dmat3; + typedef mat<4, 4, f64, highp> highp_dmat4; + + typedef mat<2, 2, f64, defaultp> dmat2; + typedef mat<3, 3, f64, defaultp> dmat3; + typedef mat<4, 4, f64, defaultp> dmat4; + + typedef mat<2, 2, f64, lowp> lowp_f64mat2; + typedef mat<3, 3, f64, lowp> lowp_f64mat3; + typedef mat<4, 4, f64, lowp> lowp_f64mat4; + + typedef mat<2, 2, f64, mediump> mediump_f64mat2; + typedef mat<3, 3, f64, mediump> mediump_f64mat3; + typedef mat<4, 4, f64, mediump> mediump_f64mat4; + + typedef mat<2, 2, f64, highp> highp_f64mat2; + typedef mat<3, 3, f64, highp> highp_f64mat3; + typedef mat<4, 4, f64, highp> highp_f64mat4; + + typedef mat<2, 2, f64, defaultp> f64mat2; + typedef mat<3, 3, f64, defaultp> f64mat3; + typedef mat<4, 4, f64, defaultp> f64mat4; + + // Matrix MxN + + typedef mat<2, 2, f32, lowp> lowp_mat2x2; + typedef mat<2, 3, f32, lowp> lowp_mat2x3; + typedef mat<2, 4, f32, lowp> lowp_mat2x4; + typedef mat<3, 2, f32, lowp> lowp_mat3x2; + typedef mat<3, 3, f32, lowp> lowp_mat3x3; + typedef mat<3, 4, f32, lowp> lowp_mat3x4; + typedef mat<4, 2, f32, lowp> lowp_mat4x2; + typedef mat<4, 3, f32, lowp> lowp_mat4x3; + typedef mat<4, 4, f32, lowp> lowp_mat4x4; + + typedef mat<2, 2, f32, mediump> mediump_mat2x2; + typedef mat<2, 3, f32, mediump> mediump_mat2x3; + typedef mat<2, 4, f32, mediump> mediump_mat2x4; + typedef mat<3, 2, f32, mediump> mediump_mat3x2; + typedef mat<3, 3, f32, mediump> mediump_mat3x3; + typedef mat<3, 4, f32, mediump> mediump_mat3x4; + typedef mat<4, 2, f32, mediump> mediump_mat4x2; + typedef mat<4, 3, f32, mediump> mediump_mat4x3; + typedef mat<4, 4, f32, mediump> mediump_mat4x4; + + typedef mat<2, 2, f32, highp> highp_mat2x2; + typedef mat<2, 3, f32, highp> highp_mat2x3; + typedef mat<2, 4, f32, highp> highp_mat2x4; + typedef mat<3, 2, f32, highp> highp_mat3x2; + typedef mat<3, 3, f32, highp> highp_mat3x3; + typedef mat<3, 4, f32, highp> highp_mat3x4; + typedef mat<4, 2, f32, highp> highp_mat4x2; + typedef mat<4, 3, f32, highp> highp_mat4x3; + typedef mat<4, 4, f32, highp> highp_mat4x4; + + typedef mat<2, 2, f32, defaultp> mat2x2; + typedef mat<2, 3, f32, defaultp> mat2x3; + typedef mat<2, 4, f32, defaultp> mat2x4; + typedef mat<3, 2, f32, defaultp> mat3x2; + typedef mat<3, 3, f32, defaultp> mat3x3; + typedef mat<3, 4, f32, defaultp> mat3x4; + typedef mat<4, 2, f32, defaultp> mat4x2; + typedef mat<4, 3, f32, defaultp> mat4x3; + typedef mat<4, 4, f32, defaultp> mat4x4; + + typedef mat<2, 2, f32, lowp> lowp_fmat2x2; + typedef mat<2, 3, f32, lowp> lowp_fmat2x3; + typedef mat<2, 4, f32, lowp> lowp_fmat2x4; + typedef mat<3, 2, f32, lowp> lowp_fmat3x2; + typedef mat<3, 3, f32, lowp> lowp_fmat3x3; + typedef mat<3, 4, f32, lowp> lowp_fmat3x4; + typedef mat<4, 2, f32, lowp> lowp_fmat4x2; + typedef mat<4, 3, f32, lowp> lowp_fmat4x3; + typedef mat<4, 4, f32, lowp> lowp_fmat4x4; + + typedef mat<2, 2, f32, mediump> mediump_fmat2x2; + typedef mat<2, 3, f32, mediump> mediump_fmat2x3; + typedef mat<2, 4, f32, mediump> mediump_fmat2x4; + typedef mat<3, 2, f32, mediump> mediump_fmat3x2; + typedef mat<3, 3, f32, mediump> mediump_fmat3x3; + typedef mat<3, 4, f32, mediump> mediump_fmat3x4; + typedef mat<4, 2, f32, mediump> mediump_fmat4x2; + typedef mat<4, 3, f32, mediump> mediump_fmat4x3; + typedef mat<4, 4, f32, mediump> mediump_fmat4x4; + + typedef mat<2, 2, f32, highp> highp_fmat2x2; + typedef mat<2, 3, f32, highp> highp_fmat2x3; + typedef mat<2, 4, f32, highp> highp_fmat2x4; + typedef mat<3, 2, f32, highp> highp_fmat3x2; + typedef mat<3, 3, f32, highp> highp_fmat3x3; + typedef mat<3, 4, f32, highp> highp_fmat3x4; + typedef mat<4, 2, f32, highp> highp_fmat4x2; + typedef mat<4, 3, f32, highp> highp_fmat4x3; + typedef mat<4, 4, f32, highp> highp_fmat4x4; + + typedef mat<2, 2, f32, defaultp> fmat2x2; + typedef mat<2, 3, f32, defaultp> fmat2x3; + typedef mat<2, 4, f32, defaultp> fmat2x4; + typedef mat<3, 2, f32, defaultp> fmat3x2; + typedef mat<3, 3, f32, defaultp> fmat3x3; + typedef mat<3, 4, f32, defaultp> fmat3x4; + typedef mat<4, 2, f32, defaultp> fmat4x2; + typedef mat<4, 3, f32, defaultp> fmat4x3; + typedef mat<4, 4, f32, defaultp> fmat4x4; + + typedef mat<2, 2, f32, lowp> lowp_f32mat2x2; + typedef mat<2, 3, f32, lowp> lowp_f32mat2x3; + typedef mat<2, 4, f32, lowp> lowp_f32mat2x4; + typedef mat<3, 2, f32, lowp> lowp_f32mat3x2; + typedef mat<3, 3, f32, lowp> lowp_f32mat3x3; + typedef mat<3, 4, f32, lowp> lowp_f32mat3x4; + typedef mat<4, 2, f32, lowp> lowp_f32mat4x2; + typedef mat<4, 3, f32, lowp> lowp_f32mat4x3; + typedef mat<4, 4, f32, lowp> lowp_f32mat4x4; + + typedef mat<2, 2, f32, mediump> mediump_f32mat2x2; + typedef mat<2, 3, f32, mediump> mediump_f32mat2x3; + typedef mat<2, 4, f32, mediump> mediump_f32mat2x4; + typedef mat<3, 2, f32, mediump> mediump_f32mat3x2; + typedef mat<3, 3, f32, mediump> mediump_f32mat3x3; + typedef mat<3, 4, f32, mediump> mediump_f32mat3x4; + typedef mat<4, 2, f32, mediump> mediump_f32mat4x2; + typedef mat<4, 3, f32, mediump> mediump_f32mat4x3; + typedef mat<4, 4, f32, mediump> mediump_f32mat4x4; + + typedef mat<2, 2, f32, highp> highp_f32mat2x2; + typedef mat<2, 3, f32, highp> highp_f32mat2x3; + typedef mat<2, 4, f32, highp> highp_f32mat2x4; + typedef mat<3, 2, f32, highp> highp_f32mat3x2; + typedef mat<3, 3, f32, highp> highp_f32mat3x3; + typedef mat<3, 4, f32, highp> highp_f32mat3x4; + typedef mat<4, 2, f32, highp> highp_f32mat4x2; + typedef mat<4, 3, f32, highp> highp_f32mat4x3; + typedef mat<4, 4, f32, highp> highp_f32mat4x4; + + typedef mat<2, 2, f32, defaultp> f32mat2x2; + typedef mat<2, 3, f32, defaultp> f32mat2x3; + typedef mat<2, 4, f32, defaultp> f32mat2x4; + typedef mat<3, 2, f32, defaultp> f32mat3x2; + typedef mat<3, 3, f32, defaultp> f32mat3x3; + typedef mat<3, 4, f32, defaultp> f32mat3x4; + typedef mat<4, 2, f32, defaultp> f32mat4x2; + typedef mat<4, 3, f32, defaultp> f32mat4x3; + typedef mat<4, 4, f32, defaultp> f32mat4x4; + + typedef mat<2, 2, double, lowp> lowp_dmat2x2; + typedef mat<2, 3, double, lowp> lowp_dmat2x3; + typedef mat<2, 4, double, lowp> lowp_dmat2x4; + typedef mat<3, 2, double, lowp> lowp_dmat3x2; + typedef mat<3, 3, double, lowp> lowp_dmat3x3; + typedef mat<3, 4, double, lowp> lowp_dmat3x4; + typedef mat<4, 2, double, lowp> lowp_dmat4x2; + typedef mat<4, 3, double, lowp> lowp_dmat4x3; + typedef mat<4, 4, double, lowp> lowp_dmat4x4; + + typedef mat<2, 2, double, mediump> mediump_dmat2x2; + typedef mat<2, 3, double, mediump> mediump_dmat2x3; + typedef mat<2, 4, double, mediump> mediump_dmat2x4; + typedef mat<3, 2, double, mediump> mediump_dmat3x2; + typedef mat<3, 3, double, mediump> mediump_dmat3x3; + typedef mat<3, 4, double, mediump> mediump_dmat3x4; + typedef mat<4, 2, double, mediump> mediump_dmat4x2; + typedef mat<4, 3, double, mediump> mediump_dmat4x3; + typedef mat<4, 4, double, mediump> mediump_dmat4x4; + + typedef mat<2, 2, double, highp> highp_dmat2x2; + typedef mat<2, 3, double, highp> highp_dmat2x3; + typedef mat<2, 4, double, highp> highp_dmat2x4; + typedef mat<3, 2, double, highp> highp_dmat3x2; + typedef mat<3, 3, double, highp> highp_dmat3x3; + typedef mat<3, 4, double, highp> highp_dmat3x4; + typedef mat<4, 2, double, highp> highp_dmat4x2; + typedef mat<4, 3, double, highp> highp_dmat4x3; + typedef mat<4, 4, double, highp> highp_dmat4x4; + + typedef mat<2, 2, double, defaultp> dmat2x2; + typedef mat<2, 3, double, defaultp> dmat2x3; + typedef mat<2, 4, double, defaultp> dmat2x4; + typedef mat<3, 2, double, defaultp> dmat3x2; + typedef mat<3, 3, double, defaultp> dmat3x3; + typedef mat<3, 4, double, defaultp> dmat3x4; + typedef mat<4, 2, double, defaultp> dmat4x2; + typedef mat<4, 3, double, defaultp> dmat4x3; + typedef mat<4, 4, double, defaultp> dmat4x4; + + typedef mat<2, 2, f64, lowp> lowp_f64mat2x2; + typedef mat<2, 3, f64, lowp> lowp_f64mat2x3; + typedef mat<2, 4, f64, lowp> lowp_f64mat2x4; + typedef mat<3, 2, f64, lowp> lowp_f64mat3x2; + typedef mat<3, 3, f64, lowp> lowp_f64mat3x3; + typedef mat<3, 4, f64, lowp> lowp_f64mat3x4; + typedef mat<4, 2, f64, lowp> lowp_f64mat4x2; + typedef mat<4, 3, f64, lowp> lowp_f64mat4x3; + typedef mat<4, 4, f64, lowp> lowp_f64mat4x4; + + typedef mat<2, 2, f64, mediump> mediump_f64mat2x2; + typedef mat<2, 3, f64, mediump> mediump_f64mat2x3; + typedef mat<2, 4, f64, mediump> mediump_f64mat2x4; + typedef mat<3, 2, f64, mediump> mediump_f64mat3x2; + typedef mat<3, 3, f64, mediump> mediump_f64mat3x3; + typedef mat<3, 4, f64, mediump> mediump_f64mat3x4; + typedef mat<4, 2, f64, mediump> mediump_f64mat4x2; + typedef mat<4, 3, f64, mediump> mediump_f64mat4x3; + typedef mat<4, 4, f64, mediump> mediump_f64mat4x4; + + typedef mat<2, 2, f64, highp> highp_f64mat2x2; + typedef mat<2, 3, f64, highp> highp_f64mat2x3; + typedef mat<2, 4, f64, highp> highp_f64mat2x4; + typedef mat<3, 2, f64, highp> highp_f64mat3x2; + typedef mat<3, 3, f64, highp> highp_f64mat3x3; + typedef mat<3, 4, f64, highp> highp_f64mat3x4; + typedef mat<4, 2, f64, highp> highp_f64mat4x2; + typedef mat<4, 3, f64, highp> highp_f64mat4x3; + typedef mat<4, 4, f64, highp> highp_f64mat4x4; + + typedef mat<2, 2, f64, defaultp> f64mat2x2; + typedef mat<2, 3, f64, defaultp> f64mat2x3; + typedef mat<2, 4, f64, defaultp> f64mat2x4; + typedef mat<3, 2, f64, defaultp> f64mat3x2; + typedef mat<3, 3, f64, defaultp> f64mat3x3; + typedef mat<3, 4, f64, defaultp> f64mat3x4; + typedef mat<4, 2, f64, defaultp> f64mat4x2; + typedef mat<4, 3, f64, defaultp> f64mat4x3; + typedef mat<4, 4, f64, defaultp> f64mat4x4; + + // Signed integer matrix MxN + + typedef mat<2, 2, int, lowp> lowp_imat2x2; + typedef mat<2, 3, int, lowp> lowp_imat2x3; + typedef mat<2, 4, int, lowp> lowp_imat2x4; + typedef mat<3, 2, int, lowp> lowp_imat3x2; + typedef mat<3, 3, int, lowp> lowp_imat3x3; + typedef mat<3, 4, int, lowp> lowp_imat3x4; + typedef mat<4, 2, int, lowp> lowp_imat4x2; + typedef mat<4, 3, int, lowp> lowp_imat4x3; + typedef mat<4, 4, int, lowp> lowp_imat4x4; + + typedef mat<2, 2, int, mediump> mediump_imat2x2; + typedef mat<2, 3, int, mediump> mediump_imat2x3; + typedef mat<2, 4, int, mediump> mediump_imat2x4; + typedef mat<3, 2, int, mediump> mediump_imat3x2; + typedef mat<3, 3, int, mediump> mediump_imat3x3; + typedef mat<3, 4, int, mediump> mediump_imat3x4; + typedef mat<4, 2, int, mediump> mediump_imat4x2; + typedef mat<4, 3, int, mediump> mediump_imat4x3; + typedef mat<4, 4, int, mediump> mediump_imat4x4; + + typedef mat<2, 2, int, highp> highp_imat2x2; + typedef mat<2, 3, int, highp> highp_imat2x3; + typedef mat<2, 4, int, highp> highp_imat2x4; + typedef mat<3, 2, int, highp> highp_imat3x2; + typedef mat<3, 3, int, highp> highp_imat3x3; + typedef mat<3, 4, int, highp> highp_imat3x4; + typedef mat<4, 2, int, highp> highp_imat4x2; + typedef mat<4, 3, int, highp> highp_imat4x3; + typedef mat<4, 4, int, highp> highp_imat4x4; + + typedef mat<2, 2, int, defaultp> imat2x2; + typedef mat<2, 3, int, defaultp> imat2x3; + typedef mat<2, 4, int, defaultp> imat2x4; + typedef mat<3, 2, int, defaultp> imat3x2; + typedef mat<3, 3, int, defaultp> imat3x3; + typedef mat<3, 4, int, defaultp> imat3x4; + typedef mat<4, 2, int, defaultp> imat4x2; + typedef mat<4, 3, int, defaultp> imat4x3; + typedef mat<4, 4, int, defaultp> imat4x4; + + + typedef mat<2, 2, int8, lowp> lowp_i8mat2x2; + typedef mat<2, 3, int8, lowp> lowp_i8mat2x3; + typedef mat<2, 4, int8, lowp> lowp_i8mat2x4; + typedef mat<3, 2, int8, lowp> lowp_i8mat3x2; + typedef mat<3, 3, int8, lowp> lowp_i8mat3x3; + typedef mat<3, 4, int8, lowp> lowp_i8mat3x4; + typedef mat<4, 2, int8, lowp> lowp_i8mat4x2; + typedef mat<4, 3, int8, lowp> lowp_i8mat4x3; + typedef mat<4, 4, int8, lowp> lowp_i8mat4x4; + + typedef mat<2, 2, int8, mediump> mediump_i8mat2x2; + typedef mat<2, 3, int8, mediump> mediump_i8mat2x3; + typedef mat<2, 4, int8, mediump> mediump_i8mat2x4; + typedef mat<3, 2, int8, mediump> mediump_i8mat3x2; + typedef mat<3, 3, int8, mediump> mediump_i8mat3x3; + typedef mat<3, 4, int8, mediump> mediump_i8mat3x4; + typedef mat<4, 2, int8, mediump> mediump_i8mat4x2; + typedef mat<4, 3, int8, mediump> mediump_i8mat4x3; + typedef mat<4, 4, int8, mediump> mediump_i8mat4x4; + + typedef mat<2, 2, int8, highp> highp_i8mat2x2; + typedef mat<2, 3, int8, highp> highp_i8mat2x3; + typedef mat<2, 4, int8, highp> highp_i8mat2x4; + typedef mat<3, 2, int8, highp> highp_i8mat3x2; + typedef mat<3, 3, int8, highp> highp_i8mat3x3; + typedef mat<3, 4, int8, highp> highp_i8mat3x4; + typedef mat<4, 2, int8, highp> highp_i8mat4x2; + typedef mat<4, 3, int8, highp> highp_i8mat4x3; + typedef mat<4, 4, int8, highp> highp_i8mat4x4; + + typedef mat<2, 2, int8, defaultp> i8mat2x2; + typedef mat<2, 3, int8, defaultp> i8mat2x3; + typedef mat<2, 4, int8, defaultp> i8mat2x4; + typedef mat<3, 2, int8, defaultp> i8mat3x2; + typedef mat<3, 3, int8, defaultp> i8mat3x3; + typedef mat<3, 4, int8, defaultp> i8mat3x4; + typedef mat<4, 2, int8, defaultp> i8mat4x2; + typedef mat<4, 3, int8, defaultp> i8mat4x3; + typedef mat<4, 4, int8, defaultp> i8mat4x4; + + + typedef mat<2, 2, int16, lowp> lowp_i16mat2x2; + typedef mat<2, 3, int16, lowp> lowp_i16mat2x3; + typedef mat<2, 4, int16, lowp> lowp_i16mat2x4; + typedef mat<3, 2, int16, lowp> lowp_i16mat3x2; + typedef mat<3, 3, int16, lowp> lowp_i16mat3x3; + typedef mat<3, 4, int16, lowp> lowp_i16mat3x4; + typedef mat<4, 2, int16, lowp> lowp_i16mat4x2; + typedef mat<4, 3, int16, lowp> lowp_i16mat4x3; + typedef mat<4, 4, int16, lowp> lowp_i16mat4x4; + + typedef mat<2, 2, int16, mediump> mediump_i16mat2x2; + typedef mat<2, 3, int16, mediump> mediump_i16mat2x3; + typedef mat<2, 4, int16, mediump> mediump_i16mat2x4; + typedef mat<3, 2, int16, mediump> mediump_i16mat3x2; + typedef mat<3, 3, int16, mediump> mediump_i16mat3x3; + typedef mat<3, 4, int16, mediump> mediump_i16mat3x4; + typedef mat<4, 2, int16, mediump> mediump_i16mat4x2; + typedef mat<4, 3, int16, mediump> mediump_i16mat4x3; + typedef mat<4, 4, int16, mediump> mediump_i16mat4x4; + + typedef mat<2, 2, int16, highp> highp_i16mat2x2; + typedef mat<2, 3, int16, highp> highp_i16mat2x3; + typedef mat<2, 4, int16, highp> highp_i16mat2x4; + typedef mat<3, 2, int16, highp> highp_i16mat3x2; + typedef mat<3, 3, int16, highp> highp_i16mat3x3; + typedef mat<3, 4, int16, highp> highp_i16mat3x4; + typedef mat<4, 2, int16, highp> highp_i16mat4x2; + typedef mat<4, 3, int16, highp> highp_i16mat4x3; + typedef mat<4, 4, int16, highp> highp_i16mat4x4; + + typedef mat<2, 2, int16, defaultp> i16mat2x2; + typedef mat<2, 3, int16, defaultp> i16mat2x3; + typedef mat<2, 4, int16, defaultp> i16mat2x4; + typedef mat<3, 2, int16, defaultp> i16mat3x2; + typedef mat<3, 3, int16, defaultp> i16mat3x3; + typedef mat<3, 4, int16, defaultp> i16mat3x4; + typedef mat<4, 2, int16, defaultp> i16mat4x2; + typedef mat<4, 3, int16, defaultp> i16mat4x3; + typedef mat<4, 4, int16, defaultp> i16mat4x4; + + + typedef mat<2, 2, int32, lowp> lowp_i32mat2x2; + typedef mat<2, 3, int32, lowp> lowp_i32mat2x3; + typedef mat<2, 4, int32, lowp> lowp_i32mat2x4; + typedef mat<3, 2, int32, lowp> lowp_i32mat3x2; + typedef mat<3, 3, int32, lowp> lowp_i32mat3x3; + typedef mat<3, 4, int32, lowp> lowp_i32mat3x4; + typedef mat<4, 2, int32, lowp> lowp_i32mat4x2; + typedef mat<4, 3, int32, lowp> lowp_i32mat4x3; + typedef mat<4, 4, int32, lowp> lowp_i32mat4x4; + + typedef mat<2, 2, int32, mediump> mediump_i32mat2x2; + typedef mat<2, 3, int32, mediump> mediump_i32mat2x3; + typedef mat<2, 4, int32, mediump> mediump_i32mat2x4; + typedef mat<3, 2, int32, mediump> mediump_i32mat3x2; + typedef mat<3, 3, int32, mediump> mediump_i32mat3x3; + typedef mat<3, 4, int32, mediump> mediump_i32mat3x4; + typedef mat<4, 2, int32, mediump> mediump_i32mat4x2; + typedef mat<4, 3, int32, mediump> mediump_i32mat4x3; + typedef mat<4, 4, int32, mediump> mediump_i32mat4x4; + + typedef mat<2, 2, int32, highp> highp_i32mat2x2; + typedef mat<2, 3, int32, highp> highp_i32mat2x3; + typedef mat<2, 4, int32, highp> highp_i32mat2x4; + typedef mat<3, 2, int32, highp> highp_i32mat3x2; + typedef mat<3, 3, int32, highp> highp_i32mat3x3; + typedef mat<3, 4, int32, highp> highp_i32mat3x4; + typedef mat<4, 2, int32, highp> highp_i32mat4x2; + typedef mat<4, 3, int32, highp> highp_i32mat4x3; + typedef mat<4, 4, int32, highp> highp_i32mat4x4; + + typedef mat<2, 2, int32, defaultp> i32mat2x2; + typedef mat<2, 3, int32, defaultp> i32mat2x3; + typedef mat<2, 4, int32, defaultp> i32mat2x4; + typedef mat<3, 2, int32, defaultp> i32mat3x2; + typedef mat<3, 3, int32, defaultp> i32mat3x3; + typedef mat<3, 4, int32, defaultp> i32mat3x4; + typedef mat<4, 2, int32, defaultp> i32mat4x2; + typedef mat<4, 3, int32, defaultp> i32mat4x3; + typedef mat<4, 4, int32, defaultp> i32mat4x4; + + + typedef mat<2, 2, int64, lowp> lowp_i64mat2x2; + typedef mat<2, 3, int64, lowp> lowp_i64mat2x3; + typedef mat<2, 4, int64, lowp> lowp_i64mat2x4; + typedef mat<3, 2, int64, lowp> lowp_i64mat3x2; + typedef mat<3, 3, int64, lowp> lowp_i64mat3x3; + typedef mat<3, 4, int64, lowp> lowp_i64mat3x4; + typedef mat<4, 2, int64, lowp> lowp_i64mat4x2; + typedef mat<4, 3, int64, lowp> lowp_i64mat4x3; + typedef mat<4, 4, int64, lowp> lowp_i64mat4x4; + + typedef mat<2, 2, int64, mediump> mediump_i64mat2x2; + typedef mat<2, 3, int64, mediump> mediump_i64mat2x3; + typedef mat<2, 4, int64, mediump> mediump_i64mat2x4; + typedef mat<3, 2, int64, mediump> mediump_i64mat3x2; + typedef mat<3, 3, int64, mediump> mediump_i64mat3x3; + typedef mat<3, 4, int64, mediump> mediump_i64mat3x4; + typedef mat<4, 2, int64, mediump> mediump_i64mat4x2; + typedef mat<4, 3, int64, mediump> mediump_i64mat4x3; + typedef mat<4, 4, int64, mediump> mediump_i64mat4x4; + + typedef mat<2, 2, int64, highp> highp_i64mat2x2; + typedef mat<2, 3, int64, highp> highp_i64mat2x3; + typedef mat<2, 4, int64, highp> highp_i64mat2x4; + typedef mat<3, 2, int64, highp> highp_i64mat3x2; + typedef mat<3, 3, int64, highp> highp_i64mat3x3; + typedef mat<3, 4, int64, highp> highp_i64mat3x4; + typedef mat<4, 2, int64, highp> highp_i64mat4x2; + typedef mat<4, 3, int64, highp> highp_i64mat4x3; + typedef mat<4, 4, int64, highp> highp_i64mat4x4; + + typedef mat<2, 2, int64, defaultp> i64mat2x2; + typedef mat<2, 3, int64, defaultp> i64mat2x3; + typedef mat<2, 4, int64, defaultp> i64mat2x4; + typedef mat<3, 2, int64, defaultp> i64mat3x2; + typedef mat<3, 3, int64, defaultp> i64mat3x3; + typedef mat<3, 4, int64, defaultp> i64mat3x4; + typedef mat<4, 2, int64, defaultp> i64mat4x2; + typedef mat<4, 3, int64, defaultp> i64mat4x3; + typedef mat<4, 4, int64, defaultp> i64mat4x4; + + + // Unsigned integer matrix MxN + + typedef mat<2, 2, uint, lowp> lowp_umat2x2; + typedef mat<2, 3, uint, lowp> lowp_umat2x3; + typedef mat<2, 4, uint, lowp> lowp_umat2x4; + typedef mat<3, 2, uint, lowp> lowp_umat3x2; + typedef mat<3, 3, uint, lowp> lowp_umat3x3; + typedef mat<3, 4, uint, lowp> lowp_umat3x4; + typedef mat<4, 2, uint, lowp> lowp_umat4x2; + typedef mat<4, 3, uint, lowp> lowp_umat4x3; + typedef mat<4, 4, uint, lowp> lowp_umat4x4; + + typedef mat<2, 2, uint, mediump> mediump_umat2x2; + typedef mat<2, 3, uint, mediump> mediump_umat2x3; + typedef mat<2, 4, uint, mediump> mediump_umat2x4; + typedef mat<3, 2, uint, mediump> mediump_umat3x2; + typedef mat<3, 3, uint, mediump> mediump_umat3x3; + typedef mat<3, 4, uint, mediump> mediump_umat3x4; + typedef mat<4, 2, uint, mediump> mediump_umat4x2; + typedef mat<4, 3, uint, mediump> mediump_umat4x3; + typedef mat<4, 4, uint, mediump> mediump_umat4x4; + + typedef mat<2, 2, uint, highp> highp_umat2x2; + typedef mat<2, 3, uint, highp> highp_umat2x3; + typedef mat<2, 4, uint, highp> highp_umat2x4; + typedef mat<3, 2, uint, highp> highp_umat3x2; + typedef mat<3, 3, uint, highp> highp_umat3x3; + typedef mat<3, 4, uint, highp> highp_umat3x4; + typedef mat<4, 2, uint, highp> highp_umat4x2; + typedef mat<4, 3, uint, highp> highp_umat4x3; + typedef mat<4, 4, uint, highp> highp_umat4x4; + + typedef mat<2, 2, uint, defaultp> umat2x2; + typedef mat<2, 3, uint, defaultp> umat2x3; + typedef mat<2, 4, uint, defaultp> umat2x4; + typedef mat<3, 2, uint, defaultp> umat3x2; + typedef mat<3, 3, uint, defaultp> umat3x3; + typedef mat<3, 4, uint, defaultp> umat3x4; + typedef mat<4, 2, uint, defaultp> umat4x2; + typedef mat<4, 3, uint, defaultp> umat4x3; + typedef mat<4, 4, uint, defaultp> umat4x4; + + + typedef mat<2, 2, uint8, lowp> lowp_u8mat2x2; + typedef mat<2, 3, uint8, lowp> lowp_u8mat2x3; + typedef mat<2, 4, uint8, lowp> lowp_u8mat2x4; + typedef mat<3, 2, uint8, lowp> lowp_u8mat3x2; + typedef mat<3, 3, uint8, lowp> lowp_u8mat3x3; + typedef mat<3, 4, uint8, lowp> lowp_u8mat3x4; + typedef mat<4, 2, uint8, lowp> lowp_u8mat4x2; + typedef mat<4, 3, uint8, lowp> lowp_u8mat4x3; + typedef mat<4, 4, uint8, lowp> lowp_u8mat4x4; + + typedef mat<2, 2, uint8, mediump> mediump_u8mat2x2; + typedef mat<2, 3, uint8, mediump> mediump_u8mat2x3; + typedef mat<2, 4, uint8, mediump> mediump_u8mat2x4; + typedef mat<3, 2, uint8, mediump> mediump_u8mat3x2; + typedef mat<3, 3, uint8, mediump> mediump_u8mat3x3; + typedef mat<3, 4, uint8, mediump> mediump_u8mat3x4; + typedef mat<4, 2, uint8, mediump> mediump_u8mat4x2; + typedef mat<4, 3, uint8, mediump> mediump_u8mat4x3; + typedef mat<4, 4, uint8, mediump> mediump_u8mat4x4; + + typedef mat<2, 2, uint8, highp> highp_u8mat2x2; + typedef mat<2, 3, uint8, highp> highp_u8mat2x3; + typedef mat<2, 4, uint8, highp> highp_u8mat2x4; + typedef mat<3, 2, uint8, highp> highp_u8mat3x2; + typedef mat<3, 3, uint8, highp> highp_u8mat3x3; + typedef mat<3, 4, uint8, highp> highp_u8mat3x4; + typedef mat<4, 2, uint8, highp> highp_u8mat4x2; + typedef mat<4, 3, uint8, highp> highp_u8mat4x3; + typedef mat<4, 4, uint8, highp> highp_u8mat4x4; + + typedef mat<2, 2, uint8, defaultp> u8mat2x2; + typedef mat<2, 3, uint8, defaultp> u8mat2x3; + typedef mat<2, 4, uint8, defaultp> u8mat2x4; + typedef mat<3, 2, uint8, defaultp> u8mat3x2; + typedef mat<3, 3, uint8, defaultp> u8mat3x3; + typedef mat<3, 4, uint8, defaultp> u8mat3x4; + typedef mat<4, 2, uint8, defaultp> u8mat4x2; + typedef mat<4, 3, uint8, defaultp> u8mat4x3; + typedef mat<4, 4, uint8, defaultp> u8mat4x4; + + + typedef mat<2, 2, uint16, lowp> lowp_u16mat2x2; + typedef mat<2, 3, uint16, lowp> lowp_u16mat2x3; + typedef mat<2, 4, uint16, lowp> lowp_u16mat2x4; + typedef mat<3, 2, uint16, lowp> lowp_u16mat3x2; + typedef mat<3, 3, uint16, lowp> lowp_u16mat3x3; + typedef mat<3, 4, uint16, lowp> lowp_u16mat3x4; + typedef mat<4, 2, uint16, lowp> lowp_u16mat4x2; + typedef mat<4, 3, uint16, lowp> lowp_u16mat4x3; + typedef mat<4, 4, uint16, lowp> lowp_u16mat4x4; + + typedef mat<2, 2, uint16, mediump> mediump_u16mat2x2; + typedef mat<2, 3, uint16, mediump> mediump_u16mat2x3; + typedef mat<2, 4, uint16, mediump> mediump_u16mat2x4; + typedef mat<3, 2, uint16, mediump> mediump_u16mat3x2; + typedef mat<3, 3, uint16, mediump> mediump_u16mat3x3; + typedef mat<3, 4, uint16, mediump> mediump_u16mat3x4; + typedef mat<4, 2, uint16, mediump> mediump_u16mat4x2; + typedef mat<4, 3, uint16, mediump> mediump_u16mat4x3; + typedef mat<4, 4, uint16, mediump> mediump_u16mat4x4; + + typedef mat<2, 2, uint16, highp> highp_u16mat2x2; + typedef mat<2, 3, uint16, highp> highp_u16mat2x3; + typedef mat<2, 4, uint16, highp> highp_u16mat2x4; + typedef mat<3, 2, uint16, highp> highp_u16mat3x2; + typedef mat<3, 3, uint16, highp> highp_u16mat3x3; + typedef mat<3, 4, uint16, highp> highp_u16mat3x4; + typedef mat<4, 2, uint16, highp> highp_u16mat4x2; + typedef mat<4, 3, uint16, highp> highp_u16mat4x3; + typedef mat<4, 4, uint16, highp> highp_u16mat4x4; + + typedef mat<2, 2, uint16, defaultp> u16mat2x2; + typedef mat<2, 3, uint16, defaultp> u16mat2x3; + typedef mat<2, 4, uint16, defaultp> u16mat2x4; + typedef mat<3, 2, uint16, defaultp> u16mat3x2; + typedef mat<3, 3, uint16, defaultp> u16mat3x3; + typedef mat<3, 4, uint16, defaultp> u16mat3x4; + typedef mat<4, 2, uint16, defaultp> u16mat4x2; + typedef mat<4, 3, uint16, defaultp> u16mat4x3; + typedef mat<4, 4, uint16, defaultp> u16mat4x4; + + + typedef mat<2, 2, uint32, lowp> lowp_u32mat2x2; + typedef mat<2, 3, uint32, lowp> lowp_u32mat2x3; + typedef mat<2, 4, uint32, lowp> lowp_u32mat2x4; + typedef mat<3, 2, uint32, lowp> lowp_u32mat3x2; + typedef mat<3, 3, uint32, lowp> lowp_u32mat3x3; + typedef mat<3, 4, uint32, lowp> lowp_u32mat3x4; + typedef mat<4, 2, uint32, lowp> lowp_u32mat4x2; + typedef mat<4, 3, uint32, lowp> lowp_u32mat4x3; + typedef mat<4, 4, uint32, lowp> lowp_u32mat4x4; + + typedef mat<2, 2, uint32, mediump> mediump_u32mat2x2; + typedef mat<2, 3, uint32, mediump> mediump_u32mat2x3; + typedef mat<2, 4, uint32, mediump> mediump_u32mat2x4; + typedef mat<3, 2, uint32, mediump> mediump_u32mat3x2; + typedef mat<3, 3, uint32, mediump> mediump_u32mat3x3; + typedef mat<3, 4, uint32, mediump> mediump_u32mat3x4; + typedef mat<4, 2, uint32, mediump> mediump_u32mat4x2; + typedef mat<4, 3, uint32, mediump> mediump_u32mat4x3; + typedef mat<4, 4, uint32, mediump> mediump_u32mat4x4; + + typedef mat<2, 2, uint32, highp> highp_u32mat2x2; + typedef mat<2, 3, uint32, highp> highp_u32mat2x3; + typedef mat<2, 4, uint32, highp> highp_u32mat2x4; + typedef mat<3, 2, uint32, highp> highp_u32mat3x2; + typedef mat<3, 3, uint32, highp> highp_u32mat3x3; + typedef mat<3, 4, uint32, highp> highp_u32mat3x4; + typedef mat<4, 2, uint32, highp> highp_u32mat4x2; + typedef mat<4, 3, uint32, highp> highp_u32mat4x3; + typedef mat<4, 4, uint32, highp> highp_u32mat4x4; + + typedef mat<2, 2, uint32, defaultp> u32mat2x2; + typedef mat<2, 3, uint32, defaultp> u32mat2x3; + typedef mat<2, 4, uint32, defaultp> u32mat2x4; + typedef mat<3, 2, uint32, defaultp> u32mat3x2; + typedef mat<3, 3, uint32, defaultp> u32mat3x3; + typedef mat<3, 4, uint32, defaultp> u32mat3x4; + typedef mat<4, 2, uint32, defaultp> u32mat4x2; + typedef mat<4, 3, uint32, defaultp> u32mat4x3; + typedef mat<4, 4, uint32, defaultp> u32mat4x4; + + + typedef mat<2, 2, uint64, lowp> lowp_u64mat2x2; + typedef mat<2, 3, uint64, lowp> lowp_u64mat2x3; + typedef mat<2, 4, uint64, lowp> lowp_u64mat2x4; + typedef mat<3, 2, uint64, lowp> lowp_u64mat3x2; + typedef mat<3, 3, uint64, lowp> lowp_u64mat3x3; + typedef mat<3, 4, uint64, lowp> lowp_u64mat3x4; + typedef mat<4, 2, uint64, lowp> lowp_u64mat4x2; + typedef mat<4, 3, uint64, lowp> lowp_u64mat4x3; + typedef mat<4, 4, uint64, lowp> lowp_u64mat4x4; + + typedef mat<2, 2, uint64, mediump> mediump_u64mat2x2; + typedef mat<2, 3, uint64, mediump> mediump_u64mat2x3; + typedef mat<2, 4, uint64, mediump> mediump_u64mat2x4; + typedef mat<3, 2, uint64, mediump> mediump_u64mat3x2; + typedef mat<3, 3, uint64, mediump> mediump_u64mat3x3; + typedef mat<3, 4, uint64, mediump> mediump_u64mat3x4; + typedef mat<4, 2, uint64, mediump> mediump_u64mat4x2; + typedef mat<4, 3, uint64, mediump> mediump_u64mat4x3; + typedef mat<4, 4, uint64, mediump> mediump_u64mat4x4; + + typedef mat<2, 2, uint64, highp> highp_u64mat2x2; + typedef mat<2, 3, uint64, highp> highp_u64mat2x3; + typedef mat<2, 4, uint64, highp> highp_u64mat2x4; + typedef mat<3, 2, uint64, highp> highp_u64mat3x2; + typedef mat<3, 3, uint64, highp> highp_u64mat3x3; + typedef mat<3, 4, uint64, highp> highp_u64mat3x4; + typedef mat<4, 2, uint64, highp> highp_u64mat4x2; + typedef mat<4, 3, uint64, highp> highp_u64mat4x3; + typedef mat<4, 4, uint64, highp> highp_u64mat4x4; + + typedef mat<2, 2, uint64, defaultp> u64mat2x2; + typedef mat<2, 3, uint64, defaultp> u64mat2x3; + typedef mat<2, 4, uint64, defaultp> u64mat2x4; + typedef mat<3, 2, uint64, defaultp> u64mat3x2; + typedef mat<3, 3, uint64, defaultp> u64mat3x3; + typedef mat<3, 4, uint64, defaultp> u64mat3x4; + typedef mat<4, 2, uint64, defaultp> u64mat4x2; + typedef mat<4, 3, uint64, defaultp> u64mat4x3; + typedef mat<4, 4, uint64, defaultp> u64mat4x4; + + // Quaternion + + typedef qua lowp_quat; + typedef qua mediump_quat; + typedef qua highp_quat; + typedef qua quat; + + typedef qua lowp_fquat; + typedef qua mediump_fquat; + typedef qua highp_fquat; + typedef qua fquat; + + typedef qua lowp_f32quat; + typedef qua mediump_f32quat; + typedef qua highp_f32quat; + typedef qua f32quat; + + typedef qua lowp_dquat; + typedef qua mediump_dquat; + typedef qua highp_dquat; + typedef qua dquat; + + typedef qua lowp_f64quat; + typedef qua mediump_f64quat; + typedef qua highp_f64quat; + typedef qua f64quat; +}//namespace glm + + diff --git a/thirdparty/manifold/thirdparty/glm/glm/geometric.hpp b/thirdparty/manifold/thirdparty/glm/glm/geometric.hpp new file mode 100644 index 000000000000..ac857e69d4cf --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/geometric.hpp @@ -0,0 +1,116 @@ +/// @ref core +/// @file glm/geometric.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions +/// +/// @defgroup core_func_geometric Geometric functions +/// @ingroup core +/// +/// These operate on vectors as vectors, not component-wise. +/// +/// Include to use these core features. + +#pragma once + +#include "detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_func_geometric + /// @{ + + /// Returns the length of x, i.e., sqrt(x * x). + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL length man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL T length(vec const& x); + + /// Returns the distance between p0 and p1, i.e., length(p0 - p1). + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL distance man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL T distance(vec const& p0, vec const& p1); + + /// Returns the dot product of x and y, i.e., result = x * y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL dot man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR T dot(vec const& x, vec const& y); + + /// Returns the cross product of x and y. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL cross man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y); + + /// Returns a vector in the same direction as x but with length of 1. + /// According to issue 10 GLSL 1.10 specification, if length(x) == 0 then result is undefined and generate an error. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL normalize man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL vec normalize(vec const& x); + + /// If dot(Nref, I) < 0.0, return N, otherwise, return -N. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL faceforward man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL vec faceforward( + vec const& N, + vec const& I, + vec const& Nref); + + /// For the incident vector I and surface orientation N, + /// returns the reflection direction : result = I - 2.0 * dot(N, I) * N. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL reflect man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL vec reflect( + vec const& I, + vec const& N); + + /// For the incident vector I and surface normal N, + /// and the ratio of indices of refraction eta, + /// return the refraction vector. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL refract man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL vec refract( + vec const& I, + vec const& N, + T eta); + + /// @} +}//namespace glm + +#include "detail/func_geometric.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/glm.cppm b/thirdparty/manifold/thirdparty/glm/glm/glm.cppm new file mode 100644 index 000000000000..85e946e0f83c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/glm.cppm @@ -0,0 +1,2675 @@ +module; + +// #define GLM_GTC_INLINE_NAMESPACE to inline glm::gtc into glm +// #define GLM_EXT_INLINE_NAMESPACE to inline glm::ext into glm +// #define GLM_GTX_INLINE_NAMESPACE to inline glm::gtx into glm + +#include +#include + +export module glm; + +export namespace glm { + // Base types + using glm::qualifier; + using glm::precision; + using glm::vec; + using glm::mat; + using glm::qua; +# if GLM_HAS_TEMPLATE_ALIASES + using glm::tvec1; + using glm::tvec2; + using glm::tvec3; + using glm::tvec4; + using glm::tmat2x2; + using glm::tmat2x3; + using glm::tmat2x4; + using glm::tmat3x2; + using glm::tmat3x3; + using glm::tmat3x4; + using glm::tmat4x2; + using glm::tmat4x3; + using glm::tmat4x4; + using glm::tquat; +# endif + + using glm::int8; + using glm::int16; + using glm::int32; + using glm::int64; + using glm::uint8; + using glm::uint16; + using glm::uint32; + using glm::uint64; + using glm::lowp_i8; + using glm::mediump_i8; + using glm::highp_i8; + using glm::i8; + using glm::lowp_int8; + using glm::mediump_int8; + using glm::highp_int8; + using glm::lowp_int8_t; + using glm::mediump_int8_t; + using glm::highp_int8_t; + using glm::int8_t; + using glm::lowp_i16; + using glm::mediump_i16; + using glm::highp_i16; + using glm::i16; + using glm::lowp_int16; + using glm::mediump_int16; + using glm::highp_int16; + using glm::lowp_int16_t; + using glm::mediump_int16_t; + using glm::highp_int16_t; + using glm::int16_t; + using glm::lowp_i32; + using glm::mediump_i32; + using glm::highp_i32; + using glm::i32; + using glm::lowp_int32; + using glm::mediump_int32; + using glm::highp_int32; + using glm::lowp_int32_t; + using glm::mediump_int32_t; + using glm::highp_int32_t; + using glm::int32_t; + using glm::lowp_i64; + using glm::mediump_i64; + using glm::highp_i64; + using glm::i64; + using glm::lowp_int64; + using glm::mediump_int64; + using glm::highp_int64; + using glm::lowp_int64_t; + using glm::mediump_int64_t; + using glm::highp_int64_t; + using glm::int64_t; + using glm::uint; + using glm::lowp_u8; + using glm::mediump_u8; + using glm::highp_u8; + using glm::u8; + using glm::lowp_uint8; + using glm::mediump_uint8; + using glm::highp_uint8; + using glm::lowp_uint8_t; + using glm::mediump_uint8_t; + using glm::highp_uint8_t; + using glm::uint8_t; + using glm::lowp_u16; + using glm::mediump_u16; + using glm::highp_u16; + using glm::u16; + using glm::lowp_uint16; + using glm::mediump_uint16; + using glm::highp_uint16; + using glm::lowp_uint16_t; + using glm::mediump_uint16_t; + using glm::highp_uint16_t; + using glm::uint16_t; + using glm::lowp_u32; + using glm::mediump_u32; + using glm::highp_u32; + using glm::u32; + using glm::lowp_uint32; + using glm::mediump_uint32; + using glm::highp_uint32; + using glm::lowp_uint32_t; + using glm::mediump_uint32_t; + using glm::highp_uint32_t; + using glm::uint32_t; + using glm::lowp_u64; + using glm::mediump_u64; + using glm::highp_u64; + using glm::u64; + using glm::lowp_uint64; + using glm::mediump_uint64; + using glm::highp_uint64; + using glm::lowp_uint64_t; + using glm::mediump_uint64_t; + using glm::highp_uint64_t; + using glm::uint64_t; + using glm::lowp_f32; + using glm::mediump_f32; + using glm::highp_f32; + using glm::f32; + using glm::lowp_float32; + using glm::mediump_float32; + using glm::highp_float32; + using glm::float32; + using glm::lowp_float32_t; + using glm::mediump_float32_t; + using glm::highp_float32_t; + using glm::float32_t; + using glm::lowp_f64; + using glm::mediump_f64; + using glm::highp_f64; + using glm::f64; + using glm::lowp_float64; + using glm::mediump_float64; + using glm::highp_float64; + using glm::float64; + using glm::lowp_float64_t; + using glm::mediump_float64_t; + using glm::highp_float64_t; + using glm::float64_t; + using glm::lowp_bvec1; + using glm::lowp_bvec2; + using glm::lowp_bvec3; + using glm::lowp_bvec4; + using glm::mediump_bvec1; + using glm::mediump_bvec2; + using glm::mediump_bvec3; + using glm::mediump_bvec4; + using glm::highp_bvec1; + using glm::highp_bvec2; + using glm::highp_bvec3; + using glm::highp_bvec4; + using glm::bvec1; + using glm::bvec2; + using glm::bvec3; + using glm::bvec4; + using glm::lowp_ivec1; + using glm::lowp_ivec2; + using glm::lowp_ivec3; + using glm::lowp_ivec4; + using glm::mediump_ivec1; + using glm::mediump_ivec2; + using glm::mediump_ivec3; + using glm::mediump_ivec4; + using glm::highp_ivec1; + using glm::highp_ivec2; + using glm::highp_ivec3; + using glm::highp_ivec4; + using glm::ivec1; + using glm::ivec2; + using glm::ivec3; + using glm::ivec4; + using glm::lowp_i8vec1; + using glm::lowp_i8vec2; + using glm::lowp_i8vec3; + using glm::lowp_i8vec4; + using glm::mediump_i8vec1; + using glm::mediump_i8vec2; + using glm::mediump_i8vec3; + using glm::mediump_i8vec4; + using glm::highp_i8vec1; + using glm::highp_i8vec2; + using glm::highp_i8vec3; + using glm::highp_i8vec4; + using glm::i8vec1; + using glm::i8vec2; + using glm::i8vec3; + using glm::i8vec4; + using glm::lowp_i16vec1; + using glm::lowp_i16vec2; + using glm::lowp_i16vec3; + using glm::lowp_i16vec4; + using glm::mediump_i16vec1; + using glm::mediump_i16vec2; + using glm::mediump_i16vec3; + using glm::mediump_i16vec4; + using glm::highp_i16vec1; + using glm::highp_i16vec2; + using glm::highp_i16vec3; + using glm::highp_i16vec4; + using glm::i16vec1; + using glm::i16vec2; + using glm::i16vec3; + using glm::i16vec4; + using glm::lowp_i32vec1; + using glm::lowp_i32vec2; + using glm::lowp_i32vec3; + using glm::lowp_i32vec4; + using glm::mediump_i32vec1; + using glm::mediump_i32vec2; + using glm::mediump_i32vec3; + using glm::mediump_i32vec4; + using glm::highp_i32vec1; + using glm::highp_i32vec2; + using glm::highp_i32vec3; + using glm::highp_i32vec4; + using glm::i32vec1; + using glm::i32vec2; + using glm::i32vec3; + using glm::i32vec4; + using glm::lowp_i64vec1; + using glm::lowp_i64vec2; + using glm::lowp_i64vec3; + using glm::lowp_i64vec4; + using glm::mediump_i64vec1; + using glm::mediump_i64vec2; + using glm::mediump_i64vec3; + using glm::mediump_i64vec4; + using glm::highp_i64vec1; + using glm::highp_i64vec2; + using glm::highp_i64vec3; + using glm::highp_i64vec4; + using glm::i64vec1; + using glm::i64vec2; + using glm::i64vec3; + using glm::i64vec4; + using glm::lowp_uvec1; + using glm::lowp_uvec2; + using glm::lowp_uvec3; + using glm::lowp_uvec4; + using glm::mediump_uvec1; + using glm::mediump_uvec2; + using glm::mediump_uvec3; + using glm::mediump_uvec4; + using glm::highp_uvec1; + using glm::highp_uvec2; + using glm::highp_uvec3; + using glm::highp_uvec4; + using glm::uvec1; + using glm::uvec2; + using glm::uvec3; + using glm::uvec4; + using glm::lowp_u8vec1; + using glm::lowp_u8vec2; + using glm::lowp_u8vec3; + using glm::lowp_u8vec4; + using glm::mediump_u8vec1; + using glm::mediump_u8vec2; + using glm::mediump_u8vec3; + using glm::mediump_u8vec4; + using glm::highp_u8vec1; + using glm::highp_u8vec2; + using glm::highp_u8vec3; + using glm::highp_u8vec4; + using glm::u8vec1; + using glm::u8vec2; + using glm::u8vec3; + using glm::u8vec4; + using glm::lowp_u16vec1; + using glm::lowp_u16vec2; + using glm::lowp_u16vec3; + using glm::lowp_u16vec4; + using glm::mediump_u16vec1; + using glm::mediump_u16vec2; + using glm::mediump_u16vec3; + using glm::mediump_u16vec4; + using glm::highp_u16vec1; + using glm::highp_u16vec2; + using glm::highp_u16vec3; + using glm::highp_u16vec4; + using glm::u16vec1; + using glm::u16vec2; + using glm::u16vec3; + using glm::u16vec4; + using glm::lowp_u32vec1; + using glm::lowp_u32vec2; + using glm::lowp_u32vec3; + using glm::lowp_u32vec4; + using glm::mediump_u32vec1; + using glm::mediump_u32vec2; + using glm::mediump_u32vec3; + using glm::mediump_u32vec4; + using glm::highp_u32vec1; + using glm::highp_u32vec2; + using glm::highp_u32vec3; + using glm::highp_u32vec4; + using glm::u32vec1; + using glm::u32vec2; + using glm::u32vec3; + using glm::u32vec4; + using glm::lowp_u64vec1; + using glm::lowp_u64vec2; + using glm::lowp_u64vec3; + using glm::lowp_u64vec4; + using glm::mediump_u64vec1; + using glm::mediump_u64vec2; + using glm::mediump_u64vec3; + using glm::mediump_u64vec4; + using glm::highp_u64vec1; + using glm::highp_u64vec2; + using glm::highp_u64vec3; + using glm::highp_u64vec4; + using glm::u64vec1; + using glm::u64vec2; + using glm::u64vec3; + using glm::u64vec4; + using glm::lowp_vec1; + using glm::lowp_vec2; + using glm::lowp_vec3; + using glm::lowp_vec4; + using glm::mediump_vec1; + using glm::mediump_vec2; + using glm::mediump_vec3; + using glm::mediump_vec4; + using glm::highp_vec1; + using glm::highp_vec2; + using glm::highp_vec3; + using glm::highp_vec4; + using glm::vec1; + using glm::vec2; + using glm::vec3; + using glm::vec4; + using glm::lowp_fvec1; + using glm::lowp_fvec2; + using glm::lowp_fvec3; + using glm::lowp_fvec4; + using glm::mediump_fvec1; + using glm::mediump_fvec2; + using glm::mediump_fvec3; + using glm::mediump_fvec4; + using glm::highp_fvec1; + using glm::highp_fvec2; + using glm::highp_fvec3; + using glm::highp_fvec4; + using glm::fvec1; + using glm::fvec2; + using glm::fvec3; + using glm::fvec4; + using glm::lowp_f32vec1; + using glm::lowp_f32vec2; + using glm::lowp_f32vec3; + using glm::lowp_f32vec4; + using glm::mediump_f32vec1; + using glm::mediump_f32vec2; + using glm::mediump_f32vec3; + using glm::mediump_f32vec4; + using glm::highp_f32vec1; + using glm::highp_f32vec2; + using glm::highp_f32vec3; + using glm::highp_f32vec4; + using glm::f32vec1; + using glm::f32vec2; + using glm::f32vec3; + using glm::f32vec4; + using glm::lowp_dvec1; + using glm::lowp_dvec2; + using glm::lowp_dvec3; + using glm::lowp_dvec4; + using glm::mediump_dvec1; + using glm::mediump_dvec2; + using glm::mediump_dvec3; + using glm::mediump_dvec4; + using glm::highp_dvec1; + using glm::highp_dvec2; + using glm::highp_dvec3; + using glm::highp_dvec4; + using glm::dvec1; + using glm::dvec2; + using glm::dvec3; + using glm::dvec4; + using glm::lowp_f64vec1; + using glm::lowp_f64vec2; + using glm::lowp_f64vec3; + using glm::lowp_f64vec4; + using glm::mediump_f64vec1; + using glm::mediump_f64vec2; + using glm::mediump_f64vec3; + using glm::mediump_f64vec4; + using glm::highp_f64vec1; + using glm::highp_f64vec2; + using glm::highp_f64vec3; + using glm::highp_f64vec4; + using glm::f64vec1; + using glm::f64vec2; + using glm::f64vec3; + using glm::f64vec4; + using glm::lowp_mat2; + using glm::lowp_mat3; + using glm::lowp_mat4; + using glm::mediump_mat2; + using glm::mediump_mat3; + using glm::mediump_mat4; + using glm::highp_mat2; + using glm::highp_mat3; + using glm::highp_mat4; + using glm::mat2; + using glm::mat3; + using glm::mat4; + using glm::lowp_fmat2; + using glm::lowp_fmat3; + using glm::lowp_fmat4; + using glm::mediump_fmat2; + using glm::mediump_fmat3; + using glm::mediump_fmat4; + using glm::highp_fmat2; + using glm::highp_fmat3; + using glm::highp_fmat4; + using glm::fmat2; + using glm::fmat3; + using glm::fmat4; + using glm::lowp_f32mat2; + using glm::lowp_f32mat3; + using glm::lowp_f32mat4; + using glm::mediump_f32mat2; + using glm::mediump_f32mat3; + using glm::mediump_f32mat4; + using glm::highp_f32mat2; + using glm::highp_f32mat3; + using glm::highp_f32mat4; + using glm::f32mat2; + using glm::f32mat3; + using glm::f32mat4; + using glm::lowp_dmat2; + using glm::lowp_dmat3; + using glm::lowp_dmat4; + using glm::mediump_dmat2; + using glm::mediump_dmat3; + using glm::mediump_dmat4; + using glm::highp_dmat2; + using glm::highp_dmat3; + using glm::highp_dmat4; + using glm::dmat2; + using glm::dmat3; + using glm::dmat4; + using glm::lowp_f64mat2; + using glm::lowp_f64mat3; + using glm::lowp_f64mat4; + using glm::mediump_f64mat2; + using glm::mediump_f64mat3; + using glm::mediump_f64mat4; + using glm::highp_f64mat2; + using glm::highp_f64mat3; + using glm::highp_f64mat4; + using glm::f64mat2; + using glm::f64mat3; + using glm::f64mat4; + using glm::lowp_mat2x2; + using glm::lowp_mat2x3; + using glm::lowp_mat2x4; + using glm::lowp_mat3x2; + using glm::lowp_mat3x3; + using glm::lowp_mat3x4; + using glm::lowp_mat4x2; + using glm::lowp_mat4x3; + using glm::lowp_mat4x4; + using glm::mediump_mat2x2; + using glm::mediump_mat2x3; + using glm::mediump_mat2x4; + using glm::mediump_mat3x2; + using glm::mediump_mat3x3; + using glm::mediump_mat3x4; + using glm::mediump_mat4x2; + using glm::mediump_mat4x3; + using glm::mediump_mat4x4; + using glm::highp_mat2x2; + using glm::highp_mat2x3; + using glm::highp_mat2x4; + using glm::highp_mat3x2; + using glm::highp_mat3x3; + using glm::highp_mat3x4; + using glm::highp_mat4x2; + using glm::highp_mat4x3; + using glm::highp_mat4x4; + using glm::mat2x2; + using glm::mat2x3; + using glm::mat2x4; + using glm::mat3x2; + using glm::mat3x3; + using glm::mat3x4; + using glm::mat4x2; + using glm::mat4x3; + using glm::mat4x4; + using glm::lowp_fmat2x2; + using glm::lowp_fmat2x3; + using glm::lowp_fmat2x4; + using glm::lowp_fmat3x2; + using glm::lowp_fmat3x3; + using glm::lowp_fmat3x4; + using glm::lowp_fmat4x2; + using glm::lowp_fmat4x3; + using glm::lowp_fmat4x4; + using glm::mediump_fmat2x2; + using glm::mediump_fmat2x3; + using glm::mediump_fmat2x4; + using glm::mediump_fmat3x2; + using glm::mediump_fmat3x3; + using glm::mediump_fmat3x4; + using glm::mediump_fmat4x2; + using glm::mediump_fmat4x3; + using glm::mediump_fmat4x4; + using glm::highp_fmat2x2; + using glm::highp_fmat2x3; + using glm::highp_fmat2x4; + using glm::highp_fmat3x2; + using glm::highp_fmat3x3; + using glm::highp_fmat3x4; + using glm::highp_fmat4x2; + using glm::highp_fmat4x3; + using glm::highp_fmat4x4; + using glm::fmat2x2; + using glm::fmat2x3; + using glm::fmat2x4; + using glm::fmat3x2; + using glm::fmat3x3; + using glm::fmat3x4; + using glm::fmat4x2; + using glm::fmat4x3; + using glm::fmat4x4; + using glm::lowp_f32mat2x2; + using glm::lowp_f32mat2x3; + using glm::lowp_f32mat2x4; + using glm::lowp_f32mat3x2; + using glm::lowp_f32mat3x3; + using glm::lowp_f32mat3x4; + using glm::lowp_f32mat4x2; + using glm::lowp_f32mat4x3; + using glm::lowp_f32mat4x4; + + using glm::mediump_f32mat2x2; + using glm::mediump_f32mat2x3; + using glm::mediump_f32mat2x4; + using glm::mediump_f32mat3x2; + using glm::mediump_f32mat3x3; + using glm::mediump_f32mat3x4; + using glm::mediump_f32mat4x2; + using glm::mediump_f32mat4x3; + using glm::mediump_f32mat4x4; + using glm::highp_f32mat2x2; + using glm::highp_f32mat2x3; + using glm::highp_f32mat2x4; + using glm::highp_f32mat3x2; + using glm::highp_f32mat3x3; + using glm::highp_f32mat3x4; + using glm::highp_f32mat4x2; + using glm::highp_f32mat4x3; + using glm::highp_f32mat4x4; + using glm::f32mat2x2; + using glm::f32mat2x3; + using glm::f32mat2x4; + using glm::f32mat3x2; + using glm::f32mat3x3; + using glm::f32mat3x4; + using glm::f32mat4x2; + using glm::f32mat4x3; + using glm::f32mat4x4; + using glm::lowp_dmat2x2; + using glm::lowp_dmat2x3; + using glm::lowp_dmat2x4; + using glm::lowp_dmat3x2; + using glm::lowp_dmat3x3; + using glm::lowp_dmat3x4; + using glm::lowp_dmat4x2; + using glm::lowp_dmat4x3; + using glm::lowp_dmat4x4; + using glm::mediump_dmat2x2; + using glm::mediump_dmat2x3; + using glm::mediump_dmat2x4; + using glm::mediump_dmat3x2; + using glm::mediump_dmat3x3; + using glm::mediump_dmat3x4; + using glm::mediump_dmat4x2; + using glm::mediump_dmat4x3; + using glm::mediump_dmat4x4; + using glm::highp_dmat2x2; + using glm::highp_dmat2x3; + using glm::highp_dmat2x4; + using glm::highp_dmat3x2; + using glm::highp_dmat3x3; + using glm::highp_dmat3x4; + using glm::highp_dmat4x2; + using glm::highp_dmat4x3; + using glm::highp_dmat4x4; + using glm::dmat2x2; + using glm::dmat2x3; + using glm::dmat2x4; + using glm::dmat3x2; + using glm::dmat3x3; + using glm::dmat3x4; + using glm::dmat4x2; + using glm::dmat4x3; + using glm::dmat4x4; + using glm::lowp_f64mat2x2; + using glm::lowp_f64mat2x3; + using glm::lowp_f64mat2x4; + using glm::lowp_f64mat3x2; + using glm::lowp_f64mat3x3; + using glm::lowp_f64mat3x4; + using glm::lowp_f64mat4x2; + using glm::lowp_f64mat4x3; + using glm::lowp_f64mat4x4; + using glm::mediump_f64mat2x2; + using glm::mediump_f64mat2x3; + using glm::mediump_f64mat2x4; + using glm::mediump_f64mat3x2; + using glm::mediump_f64mat3x3; + using glm::mediump_f64mat3x4; + using glm::mediump_f64mat4x2; + using glm::mediump_f64mat4x3; + using glm::mediump_f64mat4x4; + using glm::highp_f64mat2x2; + using glm::highp_f64mat2x3; + using glm::highp_f64mat2x4; + using glm::highp_f64mat3x2; + using glm::highp_f64mat3x3; + using glm::highp_f64mat3x4; + using glm::highp_f64mat4x2; + using glm::highp_f64mat4x3; + using glm::highp_f64mat4x4; + using glm::f64mat2x2; + using glm::f64mat2x3; + using glm::f64mat2x4; + using glm::f64mat3x2; + using glm::f64mat3x3; + using glm::f64mat3x4; + using glm::f64mat4x2; + using glm::f64mat4x3; + using glm::f64mat4x4; + using glm::lowp_imat2x2; + using glm::lowp_imat2x3; + using glm::lowp_imat2x4; + using glm::lowp_imat3x2; + using glm::lowp_imat3x3; + using glm::lowp_imat3x4; + using glm::lowp_imat4x2; + using glm::lowp_imat4x3; + using glm::lowp_imat4x4; + using glm::mediump_imat2x2; + using glm::mediump_imat2x3; + using glm::mediump_imat2x4; + using glm::mediump_imat3x2; + using glm::mediump_imat3x3; + using glm::mediump_imat3x4; + using glm::mediump_imat4x2; + using glm::mediump_imat4x3; + using glm::mediump_imat4x4; + using glm::highp_imat2x2; + using glm::highp_imat2x3; + using glm::highp_imat2x4; + using glm::highp_imat3x2; + using glm::highp_imat3x3; + using glm::highp_imat3x4; + using glm::highp_imat4x2; + using glm::highp_imat4x3; + using glm::highp_imat4x4; + using glm::imat2x2; + using glm::imat2x3; + using glm::imat2x4; + using glm::imat3x2; + using glm::imat3x3; + using glm::imat3x4; + using glm::imat4x2; + using glm::imat4x3; + using glm::imat4x4; + using glm::lowp_i8mat2x2; + using glm::lowp_i8mat2x3; + using glm::lowp_i8mat2x4; + using glm::lowp_i8mat3x2; + using glm::lowp_i8mat3x3; + using glm::lowp_i8mat3x4; + using glm::lowp_i8mat4x2; + using glm::lowp_i8mat4x3; + using glm::lowp_i8mat4x4; + using glm::mediump_i8mat2x2; + using glm::mediump_i8mat2x3; + using glm::mediump_i8mat2x4; + using glm::mediump_i8mat3x2; + using glm::mediump_i8mat3x3; + using glm::mediump_i8mat3x4; + using glm::mediump_i8mat4x2; + using glm::mediump_i8mat4x3; + using glm::mediump_i8mat4x4; + using glm::highp_i8mat2x2; + using glm::highp_i8mat2x3; + using glm::highp_i8mat2x4; + using glm::highp_i8mat3x2; + using glm::highp_i8mat3x3; + using glm::highp_i8mat3x4; + using glm::highp_i8mat4x2; + using glm::highp_i8mat4x3; + using glm::highp_i8mat4x4; + using glm::i8mat2x2; + using glm::i8mat2x3; + using glm::i8mat2x4; + using glm::i8mat3x2; + using glm::i8mat3x3; + using glm::i8mat3x4; + using glm::i8mat4x2; + using glm::i8mat4x3; + using glm::i8mat4x4; + using glm::lowp_i16mat2x2; + using glm::lowp_i16mat2x3; + using glm::lowp_i16mat2x4; + using glm::lowp_i16mat3x2; + using glm::lowp_i16mat3x3; + using glm::lowp_i16mat3x4; + using glm::lowp_i16mat4x2; + using glm::lowp_i16mat4x3; + using glm::lowp_i16mat4x4; + using glm::mediump_i16mat2x2; + using glm::mediump_i16mat2x3; + using glm::mediump_i16mat2x4; + using glm::mediump_i16mat3x2; + using glm::mediump_i16mat3x3; + using glm::mediump_i16mat3x4; + using glm::mediump_i16mat4x2; + using glm::mediump_i16mat4x3; + using glm::mediump_i16mat4x4; + using glm::highp_i16mat2x2; + using glm::highp_i16mat2x3; + using glm::highp_i16mat2x4; + using glm::highp_i16mat3x2; + using glm::highp_i16mat3x3; + using glm::highp_i16mat3x4; + using glm::highp_i16mat4x2; + using glm::highp_i16mat4x3; + using glm::highp_i16mat4x4; + using glm::i16mat2x2; + using glm::i16mat2x3; + using glm::i16mat2x4; + using glm::i16mat3x2; + using glm::i16mat3x3; + using glm::i16mat3x4; + using glm::i16mat4x2; + using glm::i16mat4x3; + using glm::i16mat4x4; + using glm::lowp_i32mat2x2; + using glm::lowp_i32mat2x3; + using glm::lowp_i32mat2x4; + using glm::lowp_i32mat3x2; + using glm::lowp_i32mat3x3; + using glm::lowp_i32mat3x4; + using glm::lowp_i32mat4x2; + using glm::lowp_i32mat4x3; + using glm::lowp_i32mat4x4; + using glm::mediump_i32mat2x2; + using glm::mediump_i32mat2x3; + using glm::mediump_i32mat2x4; + using glm::mediump_i32mat3x2; + using glm::mediump_i32mat3x3; + using glm::mediump_i32mat3x4; + using glm::mediump_i32mat4x2; + using glm::mediump_i32mat4x3; + using glm::mediump_i32mat4x4; + using glm::highp_i32mat2x2; + using glm::highp_i32mat2x3; + using glm::highp_i32mat2x4; + using glm::highp_i32mat3x2; + using glm::highp_i32mat3x3; + using glm::highp_i32mat3x4; + using glm::highp_i32mat4x2; + using glm::highp_i32mat4x3; + using glm::highp_i32mat4x4; + using glm::i32mat2x2; + using glm::i32mat2x3; + using glm::i32mat2x4; + using glm::i32mat3x2; + using glm::i32mat3x3; + using glm::i32mat3x4; + using glm::i32mat4x2; + using glm::i32mat4x3; + using glm::i32mat4x4; + using glm::lowp_i64mat2x2; + using glm::lowp_i64mat2x3; + using glm::lowp_i64mat2x4; + using glm::lowp_i64mat3x2; + using glm::lowp_i64mat3x3; + using glm::lowp_i64mat3x4; + using glm::lowp_i64mat4x2; + using glm::lowp_i64mat4x3; + using glm::lowp_i64mat4x4; + using glm::mediump_i64mat2x2; + using glm::mediump_i64mat2x3; + using glm::mediump_i64mat2x4; + using glm::mediump_i64mat3x2; + using glm::mediump_i64mat3x3; + using glm::mediump_i64mat3x4; + using glm::mediump_i64mat4x2; + using glm::mediump_i64mat4x3; + using glm::mediump_i64mat4x4; + using glm::highp_i64mat2x2; + using glm::highp_i64mat2x3; + using glm::highp_i64mat2x4; + using glm::highp_i64mat3x2; + using glm::highp_i64mat3x3; + using glm::highp_i64mat3x4; + using glm::highp_i64mat4x2; + using glm::highp_i64mat4x3; + using glm::highp_i64mat4x4; + using glm::i64mat2x2; + using glm::i64mat2x3; + using glm::i64mat2x4; + using glm::i64mat3x2; + using glm::i64mat3x3; + using glm::i64mat3x4; + using glm::i64mat4x2; + using glm::i64mat4x3; + using glm::i64mat4x4; + using glm::lowp_umat2x2; + using glm::lowp_umat2x3; + using glm::lowp_umat2x4; + using glm::lowp_umat3x2; + using glm::lowp_umat3x3; + using glm::lowp_umat3x4; + using glm::lowp_umat4x2; + using glm::lowp_umat4x3; + using glm::lowp_umat4x4; + using glm::mediump_umat2x2; + using glm::mediump_umat2x3; + using glm::mediump_umat2x4; + using glm::mediump_umat3x2; + using glm::mediump_umat3x3; + using glm::mediump_umat3x4; + using glm::mediump_umat4x2; + using glm::mediump_umat4x3; + using glm::mediump_umat4x4; + using glm::highp_umat2x2; + using glm::highp_umat2x3; + using glm::highp_umat2x4; + using glm::highp_umat3x2; + using glm::highp_umat3x3; + using glm::highp_umat3x4; + using glm::highp_umat4x2; + using glm::highp_umat4x3; + using glm::highp_umat4x4; + using glm::umat2x2; + using glm::umat2x3; + using glm::umat2x4; + using glm::umat3x2; + using glm::umat3x3; + using glm::umat3x4; + using glm::umat4x2; + using glm::umat4x3; + using glm::umat4x4; + using glm::lowp_u8mat2x2; + using glm::lowp_u8mat2x3; + using glm::lowp_u8mat2x4; + using glm::lowp_u8mat3x2; + using glm::lowp_u8mat3x3; + using glm::lowp_u8mat3x4; + using glm::lowp_u8mat4x2; + using glm::lowp_u8mat4x3; + using glm::lowp_u8mat4x4; + using glm::mediump_u8mat2x2; + using glm::mediump_u8mat2x3; + using glm::mediump_u8mat2x4; + using glm::mediump_u8mat3x2; + using glm::mediump_u8mat3x3; + using glm::mediump_u8mat3x4; + using glm::mediump_u8mat4x2; + using glm::mediump_u8mat4x3; + using glm::mediump_u8mat4x4; + using glm::highp_u8mat2x2; + using glm::highp_u8mat2x3; + using glm::highp_u8mat2x4; + using glm::highp_u8mat3x2; + using glm::highp_u8mat3x3; + using glm::highp_u8mat3x4; + using glm::highp_u8mat4x2; + using glm::highp_u8mat4x3; + using glm::highp_u8mat4x4; + using glm::u8mat2x2; + using glm::u8mat2x3; + using glm::u8mat2x4; + using glm::u8mat3x2; + using glm::u8mat3x3; + using glm::u8mat3x4; + using glm::u8mat4x2; + using glm::u8mat4x3; + using glm::u8mat4x4; + using glm::lowp_u16mat2x2; + using glm::lowp_u16mat2x3; + using glm::lowp_u16mat2x4; + using glm::lowp_u16mat3x2; + using glm::lowp_u16mat3x3; + using glm::lowp_u16mat3x4; + using glm::lowp_u16mat4x2; + using glm::lowp_u16mat4x3; + using glm::lowp_u16mat4x4; + using glm::mediump_u16mat2x2; + using glm::mediump_u16mat2x3; + using glm::mediump_u16mat2x4; + using glm::mediump_u16mat3x2; + using glm::mediump_u16mat3x3; + using glm::mediump_u16mat3x4; + using glm::mediump_u16mat4x2; + using glm::mediump_u16mat4x3; + using glm::mediump_u16mat4x4; + using glm::highp_u16mat2x2; + using glm::highp_u16mat2x3; + using glm::highp_u16mat2x4; + using glm::highp_u16mat3x2; + using glm::highp_u16mat3x3; + using glm::highp_u16mat3x4; + using glm::highp_u16mat4x2; + using glm::highp_u16mat4x3; + using glm::highp_u16mat4x4; + using glm::u16mat2x2; + using glm::u16mat2x3; + using glm::u16mat2x4; + using glm::u16mat3x2; + using glm::u16mat3x3; + using glm::u16mat3x4; + using glm::u16mat4x2; + using glm::u16mat4x3; + using glm::u16mat4x4; + using glm::lowp_u32mat2x2; + using glm::lowp_u32mat2x3; + using glm::lowp_u32mat2x4; + using glm::lowp_u32mat3x2; + using glm::lowp_u32mat3x3; + using glm::lowp_u32mat3x4; + using glm::lowp_u32mat4x2; + using glm::lowp_u32mat4x3; + using glm::lowp_u32mat4x4; + using glm::mediump_u32mat2x2; + using glm::mediump_u32mat2x3; + using glm::mediump_u32mat2x4; + using glm::mediump_u32mat3x2; + using glm::mediump_u32mat3x3; + using glm::mediump_u32mat3x4; + using glm::mediump_u32mat4x2; + using glm::mediump_u32mat4x3; + using glm::mediump_u32mat4x4; + using glm::highp_u32mat2x2; + using glm::highp_u32mat2x3; + using glm::highp_u32mat2x4; + using glm::highp_u32mat3x2; + using glm::highp_u32mat3x3; + using glm::highp_u32mat3x4; + using glm::highp_u32mat4x2; + using glm::highp_u32mat4x3; + using glm::highp_u32mat4x4; + using glm::u32mat2x2; + using glm::u32mat2x3; + using glm::u32mat2x4; + using glm::u32mat3x2; + using glm::u32mat3x3; + using glm::u32mat3x4; + using glm::u32mat4x2; + using glm::u32mat4x3; + using glm::u32mat4x4; + using glm::lowp_u64mat2x2; + using glm::lowp_u64mat2x3; + using glm::lowp_u64mat2x4; + using glm::lowp_u64mat3x2; + using glm::lowp_u64mat3x3; + using glm::lowp_u64mat3x4; + using glm::lowp_u64mat4x2; + using glm::lowp_u64mat4x3; + using glm::lowp_u64mat4x4; + using glm::mediump_u64mat2x2; + using glm::mediump_u64mat2x3; + using glm::mediump_u64mat2x4; + using glm::mediump_u64mat3x2; + using glm::mediump_u64mat3x3; + using glm::mediump_u64mat3x4; + using glm::mediump_u64mat4x2; + using glm::mediump_u64mat4x3; + using glm::mediump_u64mat4x4; + using glm::highp_u64mat2x2; + using glm::highp_u64mat2x3; + using glm::highp_u64mat2x4; + using glm::highp_u64mat3x2; + using glm::highp_u64mat3x3; + using glm::highp_u64mat3x4; + using glm::highp_u64mat4x2; + using glm::highp_u64mat4x3; + using glm::highp_u64mat4x4; + using glm::u64mat2x2; + using glm::u64mat2x3; + using glm::u64mat2x4; + using glm::u64mat3x2; + using glm::u64mat3x3; + using glm::u64mat3x4; + using glm::u64mat4x2; + using glm::u64mat4x3; + using glm::u64mat4x4; + using glm::lowp_quat; + using glm::mediump_quat; + using glm::highp_quat; + using glm::quat; + using glm::lowp_fquat; + using glm::mediump_fquat; + using glm::highp_fquat; + using glm::fquat; + using glm::lowp_f32quat; + using glm::mediump_f32quat; + using glm::highp_f32quat; + using glm::f32quat; + using glm::lowp_dquat; + using glm::mediump_dquat; + using glm::highp_dquat; + using glm::dquat; + using glm::lowp_f64quat; + using glm::mediump_f64quat; + using glm::highp_f64quat; + using glm::f64quat; + + // Operators + using glm::operator+; + using glm::operator-; + using glm::operator*; + using glm::operator/; + using glm::operator%; + using glm::operator^; + using glm::operator&; + using glm::operator|; + using glm::operator~; + using glm::operator<<; + using glm::operator>>; + using glm::operator==; + using glm::operator!=; + using glm::operator&&; + using glm::operator||; + + // Core functions + using glm::abs; + using glm::acos; + using glm::acosh; + using glm::all; + using glm::any; + using glm::asin; + using glm::asinh; + using glm::atan; + using glm::atanh; + using glm::bitCount; + using glm::bitfieldExtract; + using glm::bitfieldInsert; + using glm::bitfieldReverse; + using glm::ceil; + using glm::clamp; + using glm::cos; + using glm::cosh; + using glm::cross; + using glm::degrees; + using glm::determinant; + using glm::distance; + using glm::dot; + using glm::equal; + using glm::exp; + using glm::exp2; + using glm::faceforward; + using glm::findLSB; + using glm::findMSB; + using glm::floatBitsToInt; + using glm::floatBitsToUint; + using glm::floor; + using glm::fma; + using glm::fract; + using glm::frexp; + using glm::greaterThan; + using glm::greaterThanEqual; + using glm::imulExtended; + using glm::intBitsToFloat; + using glm::inverse; + using glm::inversesqrt; + using glm::isinf; + using glm::isnan; + using glm::ldexp; + using glm::length; + using glm::lessThan; + using glm::lessThanEqual; + using glm::log; + using glm::log2; + using glm::matrixCompMult; + using glm::max; + using glm::min; + using glm::mix; + using glm::mod; + using glm::modf; + using glm::normalize; + using glm::notEqual; + using glm::not_; + using glm::outerProduct; + using glm::packDouble2x32; + using glm::packHalf2x16; + using glm::packSnorm2x16; + using glm::packSnorm4x8; + using glm::packUnorm2x16; + using glm::packUnorm4x8; + using glm::pow; + using glm::radians; + using glm::reflect; + using glm::refract; + using glm::round; + using glm::roundEven; + using glm::sign; + using glm::sin; + using glm::sinh; + using glm::smoothstep; + using glm::sqrt; + using glm::step; + using glm::tan; + using glm::tanh; + using glm::transpose; + using glm::trunc; + using glm::uaddCarry; + using glm::uintBitsToFloat; + using glm::umulExtended; + using glm::unpackDouble2x32; + using glm::unpackHalf2x16; + using glm::unpackSnorm2x16; + using glm::unpackSnorm4x8; + using glm::unpackUnorm2x16; + using glm::unpackUnorm4x8; + using glm::usubBorrow; + +# ifdef GLM_GTC_INLINE_NAMESPACE + inline +# endif + namespace gtc { +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + using glm::aligned_highp_vec1; + using glm::aligned_mediump_vec1; + using glm::aligned_lowp_vec1; + using glm::aligned_highp_dvec1; + using glm::aligned_mediump_dvec1; + using glm::aligned_lowp_dvec1; + using glm::aligned_highp_ivec1; + using glm::aligned_mediump_ivec1; + using glm::aligned_lowp_ivec1; + using glm::aligned_highp_uvec1; + using glm::aligned_mediump_uvec1; + using glm::aligned_lowp_uvec1; + using glm::aligned_highp_bvec1; + using glm::aligned_mediump_bvec1; + using glm::aligned_lowp_bvec1; + using glm::packed_highp_vec1; + using glm::packed_mediump_vec1; + using glm::packed_lowp_vec1; + using glm::packed_highp_dvec1; + using glm::packed_mediump_dvec1; + using glm::packed_lowp_dvec1; + using glm::packed_highp_ivec1; + using glm::packed_mediump_ivec1; + using glm::packed_lowp_ivec1; + using glm::packed_highp_uvec1; + using glm::packed_mediump_uvec1; + using glm::packed_lowp_uvec1; + using glm::packed_highp_bvec1; + using glm::packed_mediump_bvec1; + using glm::packed_lowp_bvec1; + using glm::aligned_highp_vec2; + using glm::aligned_mediump_vec2; + using glm::aligned_lowp_vec2; + using glm::aligned_highp_dvec2; + using glm::aligned_mediump_dvec2; + using glm::aligned_lowp_dvec2; + using glm::aligned_highp_ivec2; + using glm::aligned_mediump_ivec2; + using glm::aligned_lowp_ivec2; + using glm::aligned_highp_uvec2; + using glm::aligned_mediump_uvec2; + using glm::aligned_lowp_uvec2; + using glm::aligned_highp_bvec2; + using glm::aligned_mediump_bvec2; + using glm::aligned_lowp_bvec2; + using glm::packed_highp_vec2; + using glm::packed_mediump_vec2; + using glm::packed_lowp_vec2; + using glm::packed_highp_dvec2; + using glm::packed_mediump_dvec2; + using glm::packed_lowp_dvec2; + using glm::packed_highp_ivec2; + using glm::packed_mediump_ivec2; + using glm::packed_lowp_ivec2; + using glm::packed_highp_uvec2; + using glm::packed_mediump_uvec2; + using glm::packed_lowp_uvec2; + using glm::packed_highp_bvec2; + using glm::packed_mediump_bvec2; + using glm::packed_lowp_bvec2; + using glm::aligned_highp_vec3; + using glm::aligned_mediump_vec3; + using glm::aligned_lowp_vec3; + using glm::aligned_highp_dvec3; + using glm::aligned_mediump_dvec3; + using glm::aligned_lowp_dvec3; + using glm::aligned_highp_ivec3; + using glm::aligned_mediump_ivec3; + using glm::aligned_lowp_ivec3; + using glm::aligned_highp_uvec3; + using glm::aligned_mediump_uvec3; + using glm::aligned_lowp_uvec3; + using glm::aligned_highp_bvec3; + using glm::aligned_mediump_bvec3; + using glm::aligned_lowp_bvec3; + using glm::packed_highp_vec3; + using glm::packed_mediump_vec3; + using glm::packed_lowp_vec3; + using glm::packed_highp_dvec3; + using glm::packed_mediump_dvec3; + using glm::packed_lowp_dvec3; + using glm::packed_highp_ivec3; + using glm::packed_mediump_ivec3; + using glm::packed_lowp_ivec3; + using glm::packed_highp_uvec3; + using glm::packed_mediump_uvec3; + using glm::packed_lowp_uvec3; + using glm::packed_highp_bvec3; + using glm::packed_mediump_bvec3; + using glm::packed_lowp_bvec3; + using glm::aligned_highp_vec4; + using glm::aligned_mediump_vec4; + using glm::aligned_lowp_vec4; + using glm::aligned_highp_dvec4; + using glm::aligned_mediump_dvec4; + using glm::aligned_lowp_dvec4; + using glm::aligned_highp_ivec4; + using glm::aligned_mediump_ivec4; + using glm::aligned_lowp_ivec4; + using glm::aligned_highp_uvec4; + using glm::aligned_mediump_uvec4; + using glm::aligned_lowp_uvec4; + using glm::aligned_highp_bvec4; + using glm::aligned_mediump_bvec4; + using glm::aligned_lowp_bvec4; + using glm::packed_highp_vec4; + using glm::packed_mediump_vec4; + using glm::packed_lowp_vec4; + using glm::packed_highp_dvec4; + using glm::packed_mediump_dvec4; + using glm::packed_lowp_dvec4; + using glm::packed_highp_ivec4; + using glm::packed_mediump_ivec4; + using glm::packed_lowp_ivec4; + using glm::packed_highp_uvec4; + using glm::packed_mediump_uvec4; + using glm::packed_lowp_uvec4; + using glm::packed_highp_bvec4; + using glm::packed_mediump_bvec4; + using glm::packed_lowp_bvec4; + using glm::aligned_highp_mat2; + using glm::aligned_mediump_mat2; + using glm::aligned_lowp_mat2; + using glm::aligned_highp_dmat2; + using glm::aligned_mediump_dmat2; + using glm::aligned_lowp_dmat2; + using glm::packed_highp_mat2; + using glm::packed_mediump_mat2; + using glm::packed_lowp_mat2; + using glm::packed_highp_dmat2; + using glm::packed_mediump_dmat2; + using glm::packed_lowp_dmat2; + using glm::aligned_highp_mat3; + using glm::aligned_mediump_mat3; + using glm::aligned_lowp_mat3; + using glm::aligned_highp_dmat3; + using glm::aligned_mediump_dmat3; + using glm::aligned_lowp_dmat3; + using glm::packed_highp_mat3; + using glm::packed_mediump_mat3; + using glm::packed_lowp_mat3; + using glm::packed_highp_dmat3; + using glm::packed_mediump_dmat3; + using glm::packed_lowp_dmat3; + using glm::aligned_highp_mat4; + using glm::aligned_mediump_mat4; + using glm::aligned_lowp_mat4; + using glm::aligned_highp_dmat4; + using glm::aligned_mediump_dmat4; + using glm::aligned_lowp_dmat4; + using glm::packed_highp_mat4; + using glm::packed_mediump_mat4; + using glm::packed_lowp_mat4; + using glm::packed_highp_dmat4; + using glm::packed_mediump_dmat4; + using glm::packed_lowp_dmat4; + using glm::aligned_highp_mat2x2; + using glm::aligned_mediump_mat2x2; + using glm::aligned_lowp_mat2x2; + using glm::aligned_highp_dmat2x2; + using glm::aligned_mediump_dmat2x2; + using glm::aligned_lowp_dmat2x2; + using glm::packed_highp_mat2x2; + using glm::packed_mediump_mat2x2; + using glm::packed_lowp_mat2x2; + using glm::packed_highp_dmat2x2; + using glm::packed_mediump_dmat2x2; + using glm::packed_lowp_dmat2x2; + using glm::aligned_highp_mat2x3; + using glm::aligned_mediump_mat2x3; + using glm::aligned_lowp_mat2x3; + using glm::aligned_highp_dmat2x3; + using glm::aligned_mediump_dmat2x3; + using glm::aligned_lowp_dmat2x3; + using glm::packed_highp_mat2x3; + using glm::packed_mediump_mat2x3; + using glm::packed_lowp_mat2x3; + using glm::packed_highp_dmat2x3; + using glm::packed_mediump_dmat2x3; + using glm::packed_lowp_dmat2x3; + using glm::aligned_highp_mat2x4; + using glm::aligned_mediump_mat2x4; + using glm::aligned_lowp_mat2x4; + using glm::aligned_highp_dmat2x4; + using glm::aligned_mediump_dmat2x4; + using glm::aligned_lowp_dmat2x4; + using glm::packed_highp_mat2x4; + using glm::packed_mediump_mat2x4; + using glm::packed_lowp_mat2x4; + using glm::packed_highp_dmat2x4; + using glm::packed_mediump_dmat2x4; + using glm::packed_lowp_dmat2x4; + using glm::aligned_highp_mat3x2; + using glm::aligned_mediump_mat3x2; + using glm::aligned_lowp_mat3x2; + using glm::aligned_highp_dmat3x2; + using glm::aligned_mediump_dmat3x2; + using glm::aligned_lowp_dmat3x2; + using glm::packed_highp_mat3x2; + using glm::packed_mediump_mat3x2; + using glm::packed_lowp_mat3x2; + using glm::packed_highp_dmat3x2; + using glm::packed_mediump_dmat3x2; + using glm::packed_lowp_dmat3x2; + using glm::aligned_highp_mat3x3; + using glm::aligned_mediump_mat3x3; + using glm::aligned_lowp_mat3x3; + using glm::aligned_highp_dmat3x3; + using glm::aligned_mediump_dmat3x3; + using glm::aligned_lowp_dmat3x3; + using glm::packed_highp_mat3x3; + using glm::packed_mediump_mat3x3; + using glm::packed_lowp_mat3x3; + using glm::packed_highp_dmat3x3; + using glm::packed_mediump_dmat3x3; + using glm::packed_lowp_dmat3x3; + using glm::aligned_highp_mat3x4; + using glm::aligned_mediump_mat3x4; + using glm::aligned_lowp_mat3x4; + using glm::aligned_highp_dmat3x4; + using glm::aligned_mediump_dmat3x4; + using glm::aligned_lowp_dmat3x4; + using glm::packed_highp_mat3x4; + using glm::packed_mediump_mat3x4; + using glm::packed_lowp_mat3x4; + using glm::packed_highp_dmat3x4; + using glm::packed_mediump_dmat3x4; + using glm::packed_lowp_dmat3x4; + using glm::aligned_highp_mat4x2; + using glm::aligned_mediump_mat4x2; + using glm::aligned_lowp_mat4x2; + using glm::aligned_highp_dmat4x2; + using glm::aligned_mediump_dmat4x2; + using glm::aligned_lowp_dmat4x2; + using glm::packed_highp_mat4x2; + using glm::packed_mediump_mat4x2; + using glm::packed_lowp_mat4x2; + using glm::packed_highp_dmat4x2; + using glm::packed_mediump_dmat4x2; + using glm::packed_lowp_dmat4x2; + using glm::aligned_highp_mat4x3; + using glm::aligned_mediump_mat4x3; + using glm::aligned_lowp_mat4x3; + using glm::aligned_highp_dmat4x3; + using glm::aligned_mediump_dmat4x3; + using glm::aligned_lowp_dmat4x3; + using glm::packed_highp_mat4x3; + using glm::packed_mediump_mat4x3; + using glm::packed_lowp_mat4x3; + using glm::packed_highp_dmat4x3; + using glm::packed_mediump_dmat4x3; + using glm::packed_lowp_dmat4x3; + using glm::aligned_highp_mat4x4; + using glm::aligned_mediump_mat4x4; + using glm::aligned_lowp_mat4x4; + using glm::aligned_highp_dmat4x4; + using glm::aligned_mediump_dmat4x4; + using glm::aligned_lowp_dmat4x4; + using glm::packed_highp_mat4x4; + using glm::packed_mediump_mat4x4; + using glm::packed_lowp_mat4x4; + using glm::packed_highp_dmat4x4; + using glm::packed_mediump_dmat4x4; + using glm::packed_lowp_dmat4x4; +# if(defined(GLM_PRECISION_LOWP_FLOAT)) + using glm::aligned_vec1; + using glm::aligned_vec2; + using glm::aligned_vec3; + using glm::aligned_vec4; + using glm::packed_vec1; + using glm::packed_vec2; + using glm::packed_vec3; + using glm::packed_vec4; + using glm::aligned_mat2; + using glm::aligned_mat3; + using glm::aligned_mat4; + using glm::packed_mat2; + using glm::packed_mat3; + using glm::packed_mat4; + using glm::aligned_mat2x2; + using glm::aligned_mat2x3; + using glm::aligned_mat2x4; + using glm::aligned_mat3x2; + using glm::aligned_mat3x3; + using glm::aligned_mat3x4; + using glm::aligned_mat4x2; + using glm::aligned_mat4x3; + using glm::aligned_mat4x4; + using glm::packed_mat2x2; + using glm::packed_mat2x3; + using glm::packed_mat2x4; + using glm::packed_mat3x2; + using glm::packed_mat3x3; + using glm::packed_mat3x4; + using glm::packed_mat4x2; + using glm::packed_mat4x3; + using glm::packed_mat4x4; +# elif(defined(GLM_PRECISION_MEDIUMP_FLOAT)) + using glm::aligned_vec1; + using glm::aligned_vec2; + using glm::aligned_vec3; + using glm::aligned_vec4; + using glm::packed_vec1; + using glm::packed_vec2; + using glm::packed_vec3; + using glm::packed_vec4; + using glm::aligned_mat2; + using glm::aligned_mat3; + using glm::aligned_mat4; + using glm::packed_mat2; + using glm::packed_mat3; + using glm::packed_mat4; + using glm::aligned_mat2x2; + using glm::aligned_mat2x3; + using glm::aligned_mat2x4; + using glm::aligned_mat3x2; + using glm::aligned_mat3x3; + using glm::aligned_mat3x4; + using glm::aligned_mat4x2; + using glm::aligned_mat4x3; + using glm::aligned_mat4x4; + using glm::packed_mat2x2; + using glm::packed_mat2x3; + using glm::packed_mat2x4; + using glm::packed_mat3x2; + using glm::packed_mat3x3; + using glm::packed_mat3x4; + using glm::packed_mat4x2; + using glm::packed_mat4x3; + using glm::packed_mat4x4; +# else //defined(GLM_PRECISION_HIGHP_FLOAT) + using glm::aligned_vec1; + using glm::aligned_vec2; + using glm::aligned_vec3; + using glm::aligned_vec4; + using glm::packed_vec1; + using glm::packed_vec2; + using glm::packed_vec3; + using glm::packed_vec4; + using glm::aligned_mat2; + using glm::aligned_mat3; + using glm::aligned_mat4; + using glm::packed_mat2; + using glm::packed_mat3; + using glm::packed_mat4; + using glm::aligned_mat2x2; + using glm::aligned_mat2x3; + using glm::aligned_mat2x4; + using glm::aligned_mat3x2; + using glm::aligned_mat3x3; + using glm::aligned_mat3x4; + using glm::aligned_mat4x2; + using glm::aligned_mat4x3; + using glm::aligned_mat4x4; + using glm::packed_mat2x2; + using glm::packed_mat2x3; + using glm::packed_mat2x4; + using glm::packed_mat3x2; + using glm::packed_mat3x3; + using glm::packed_mat3x4; + using glm::packed_mat4x2; + using glm::packed_mat4x3; + using glm::packed_mat4x4; +# endif//GLM_PRECISION +# if(defined(GLM_PRECISION_LOWP_DOUBLE)) + using glm::aligned_dvec1; + using glm::aligned_dvec2; + using glm::aligned_dvec3; + using glm::aligned_dvec4; + using glm::packed_dvec1; + using glm::packed_dvec2; + using glm::packed_dvec3; + using glm::packed_dvec4; + using glm::aligned_dmat2; + using glm::aligned_dmat3; + using glm::aligned_dmat4; + using glm::packed_dmat2; + using glm::packed_dmat3; + using glm::packed_dmat4; + using glm::aligned_dmat2x2; + using glm::aligned_dmat2x3; + using glm::aligned_dmat2x4; + using glm::aligned_dmat3x2; + using glm::aligned_dmat3x3; + using glm::aligned_dmat3x4; + using glm::aligned_dmat4x2; + using glm::aligned_dmat4x3; + using glm::aligned_dmat4x4; + using glm::packed_dmat2x2; + using glm::packed_dmat2x3; + using glm::packed_dmat2x4; + using glm::packed_dmat3x2; + using glm::packed_dmat3x3; + using glm::packed_dmat3x4; + using glm::packed_dmat4x2; + using glm::packed_dmat4x3; + using glm::packed_dmat4x4; +# elif(defined(GLM_PRECISION_MEDIUMP_DOUBLE)) + using glm::aligned_dvec1; + using glm::aligned_dvec2; + using glm::aligned_dvec3; + using glm::aligned_dvec4; + using glm::packed_dvec1; + using glm::packed_dvec2; + using glm::packed_dvec3; + using glm::packed_dvec4; + using glm::aligned_dmat2; + using glm::aligned_dmat3; + using glm::aligned_dmat4; + using glm::packed_dmat2; + using glm::packed_dmat3; + using glm::packed_dmat4; + using glm::aligned_dmat2x2; + using glm::aligned_dmat2x3; + using glm::aligned_dmat2x4; + using glm::aligned_dmat3x2; + using glm::aligned_dmat3x3; + using glm::aligned_dmat3x4; + using glm::aligned_dmat4x2; + using glm::aligned_dmat4x3; + using glm::aligned_dmat4x4; + using glm::packed_dmat2x2; + using glm::packed_dmat2x3; + using glm::packed_dmat2x4; + using glm::packed_dmat3x2; + using glm::packed_dmat3x3; + using glm::packed_dmat3x4; + using glm::packed_dmat4x2; + using glm::packed_dmat4x3; + using glm::packed_dmat4x4; +# else //defined(GLM_PRECISION_HIGHP_DOUBLE) + using glm::aligned_dvec1; + using glm::aligned_dvec2; + using glm::aligned_dvec3; + using glm::aligned_dvec4; + using glm::packed_dvec1; + using glm::packed_dvec2; + using glm::packed_dvec3; + using glm::packed_dvec4; + using glm::aligned_dmat2; + using glm::aligned_dmat3; + using glm::aligned_dmat4; + using glm::packed_dmat2; + using glm::packed_dmat3; + using glm::packed_dmat4; + using glm::aligned_dmat2x2; + using glm::aligned_dmat2x3; + using glm::aligned_dmat2x4; + using glm::aligned_dmat3x2; + using glm::aligned_dmat3x3; + using glm::aligned_dmat3x4; + using glm::aligned_dmat4x2; + using glm::aligned_dmat4x3; + using glm::aligned_dmat4x4; + using glm::packed_dmat2x2; + using glm::packed_dmat2x3; + using glm::packed_dmat2x4; + using glm::packed_dmat3x2; + using glm::packed_dmat3x3; + using glm::packed_dmat3x4; + using glm::packed_dmat4x2; + using glm::packed_dmat4x3; + using glm::packed_dmat4x4; +# endif//GLM_PRECISION +# if(defined(GLM_PRECISION_LOWP_INT)) + using glm::aligned_ivec1; + using glm::aligned_ivec2; + using glm::aligned_ivec3; + using glm::aligned_ivec4; +# elif(defined(GLM_PRECISION_MEDIUMP_INT)) + using glm::aligned_ivec1; + using glm::aligned_ivec2; + using glm::aligned_ivec3; + using glm::aligned_ivec4; +# else //defined(GLM_PRECISION_HIGHP_INT) + using glm::aligned_ivec1; + using glm::aligned_ivec2; + using glm::aligned_ivec3; + using glm::aligned_ivec4; + using glm::packed_ivec1; + using glm::packed_ivec2; + using glm::packed_ivec3; + using glm::packed_ivec4; +# endif//GLM_PRECISION +# if(defined(GLM_PRECISION_LOWP_UINT)) + using glm::aligned_uvec1; + using glm::aligned_uvec2; + using glm::aligned_uvec3; + using glm::aligned_uvec4; +# elif(defined(GLM_PRECISION_MEDIUMP_UINT)) + using glm::aligned_uvec1; + using glm::aligned_uvec2; + using glm::aligned_uvec3; + using glm::aligned_uvec4; +# else //defined(GLM_PRECISION_HIGHP_UINT) + using glm::aligned_uvec1; + using glm::aligned_uvec2; + using glm::aligned_uvec3; + using glm::aligned_uvec4; + using glm::packed_uvec1; + using glm::packed_uvec2; + using glm::packed_uvec3; + using glm::packed_uvec4; +# endif//GLM_PRECISION +# if(defined(GLM_PRECISION_LOWP_BOOL)) + using glm::aligned_bvec1; + using glm::aligned_bvec2; + using glm::aligned_bvec3; + using glm::aligned_bvec4; +# elif(defined(GLM_PRECISION_MEDIUMP_BOOL)) + using glm::aligned_bvec1; + using glm::aligned_bvec2; + using glm::aligned_bvec3; + using glm::aligned_bvec4; +# else //defined(GLM_PRECISION_HIGHP_BOOL) + using glm::aligned_bvec1; + using glm::aligned_bvec2; + using glm::aligned_bvec3; + using glm::aligned_bvec4; + using glm::packed_bvec1; + using glm::packed_bvec2; + using glm::packed_bvec3; + using glm::packed_bvec4; +# endif//GLM_PRECISION +# endif + + + using glm::abs; + using glm::acos; + using glm::acosh; + using glm::acot; + using glm::acoth; + using glm::acsc; + using glm::acsch; + using glm::affineInverse; + using glm::all; + using glm::angle; + using glm::angleAxis; + using glm::any; + using glm::asec; + using glm::asech; + using glm::asin; + using glm::asinh; + using glm::atan; + using glm::atanh; + using glm::axis; + using glm::ballRand; + using glm::bitCount; + using glm::bitfieldDeinterleave; + using glm::bitfieldExtract; + using glm::bitfieldFillOne; + using glm::bitfieldFillZero; + using glm::bitfieldInsert; + using glm::bitfieldInterleave; + using glm::bitfieldReverse; + using glm::bitfieldRotateLeft; + using glm::bitfieldRotateRight; + using glm::ceil; + using glm::ceilMultiple; + using glm::ceilPowerOfTwo; + using glm::circularRand; + using glm::clamp; + using glm::column; + using glm::conjugate; + using glm::convertLinearToSRGB; + using glm::convertSRGBToLinear; + using glm::cos; + using glm::cos_one_over_two; + using glm::cosh; + using glm::cot; + using glm::coth; + using glm::cross; + using glm::csc; + using glm::csch; + using glm::degrees; + using glm::determinant; + using glm::diskRand; + using glm::distance; + using glm::dot; + using glm::e; + using glm::epsilon; + using glm::epsilonEqual; + using glm::epsilonNotEqual; + using glm::equal; + using glm::euler; + using glm::eulerAngles; + using glm::exp; + using glm::exp2; + using glm::faceforward; + using glm::fclamp; + using glm::findLSB; + using glm::findMSB; + using glm::floatBitsToInt; + using glm::floatBitsToUint; + using glm::float_distance; + using glm::floor; + using glm::floorMultiple; + using glm::floorPowerOfTwo; + using glm::fma; + using glm::fmax; + using glm::fmin; + using glm::four_over_pi; + using glm::fract; + using glm::frexp; + using glm::frustum; + using glm::frustumLH; + using glm::frustumLH_NO; + using glm::frustumLH_ZO; + using glm::frustumNO; + using glm::frustumRH; + using glm::frustumRH_NO; + using glm::frustumRH_ZO; + using glm::frustumZO; + using glm::gaussRand; + using glm::golden_ratio; + using glm::greaterThan; + using glm::greaterThanEqual; + using glm::half_pi; + using glm::identity; + using glm::imulExtended; + using glm::infinitePerspective; + using glm::infinitePerspectiveLH; + using glm::infinitePerspectiveRH; + using glm::intBitsToFloat; + using glm::inverse; + using glm::inverseTranspose; + using glm::inversesqrt; + using glm::iround; + using glm::isinf; + using glm::isnan; + using glm::ldexp; + using glm::length; + using glm::lerp; + using glm::lessThan; + using glm::lessThanEqual; + using glm::linearRand; + using glm::ln_ln_two; + using glm::ln_ten; + using glm::ln_two; + using glm::log; + using glm::log2; + using glm::lookAt; + using glm::lookAtLH; + using glm::lookAtRH; + using glm::make_mat2; + using glm::make_mat2x2; + using glm::make_mat2x3; + using glm::make_mat2x4; + using glm::make_mat3; + using glm::make_mat3x2; + using glm::make_mat3x3; + using glm::make_mat3x4; + using glm::make_mat4; + using glm::make_mat4x2; + using glm::make_mat4x3; + using glm::make_mat4x4; + using glm::make_quat; + using glm::make_vec1; + using glm::make_vec2; + using glm::make_vec3; + using glm::make_vec4; + using glm::mask; + using glm::mat3_cast; + using glm::mat4_cast; + using glm::matrixCompMult; + using glm::max; + using glm::min; + using glm::mirrorClamp; + using glm::mirrorRepeat; + using glm::mix; + using glm::mod; + using glm::modf; + using glm::next_float; + using glm::normalize; + using glm::notEqual; + using glm::not_; + using glm::one; + using glm::one_over_pi; + using glm::one_over_root_two; + using glm::one_over_two_pi; + using glm::ortho; + using glm::orthoLH; + using glm::orthoLH_NO; + using glm::orthoLH_ZO; + using glm::orthoNO; + using glm::orthoRH; + using glm::orthoRH_NO; + using glm::orthoRH_ZO; + using glm::orthoZO; + using glm::outerProduct; + using glm::packF2x11_1x10; + using glm::packF3x9_E1x5; + using glm::packHalf; + using glm::packHalf1x16; + using glm::packHalf4x16; + using glm::packI3x10_1x2; + using glm::packInt2x16; + using glm::packInt2x32; + using glm::packInt2x8; + using glm::packInt4x16; + using glm::packInt4x8; + using glm::packRGBM; + using glm::packSnorm; + using glm::packSnorm1x16; + using glm::packSnorm1x8; + using glm::packSnorm2x8; + using glm::packSnorm3x10_1x2; + using glm::packSnorm4x16; + using glm::packU3x10_1x2; + using glm::packUint2x16; + using glm::packUint2x32; + using glm::packUint2x8; + using glm::packUint4x16; + using glm::packUint4x8; + using glm::packUnorm; + using glm::packUnorm1x16; + using glm::packUnorm1x5_1x6_1x5; + using glm::packUnorm1x8; + using glm::packUnorm2x3_1x2; + using glm::packUnorm2x4; + using glm::packUnorm2x8; + using glm::packUnorm3x10_1x2; + using glm::packUnorm3x5_1x1; + using glm::packUnorm4x16; + using glm::packUnorm4x4; + using glm::perlin; + using glm::perspective; + using glm::perspectiveFov; + using glm::perspectiveFovLH; + using glm::perspectiveFovLH_NO; + using glm::perspectiveFovLH_ZO; + using glm::perspectiveFovNO; + using glm::perspectiveFovRH; + using glm::perspectiveFovRH_NO; + using glm::perspectiveFovRH_ZO; + using glm::perspectiveFovZO; + using glm::perspectiveLH; + using glm::perspectiveLH_NO; + using glm::perspectiveLH_ZO; + using glm::perspectiveNO; + using glm::perspectiveRH; + using glm::perspectiveRH_NO; + using glm::perspectiveRH_ZO; + using glm::perspectiveZO; + using glm::pi; + using glm::pickMatrix; + using glm::pitch; + using glm::pow; + using glm::prev_float; + using glm::project; + using glm::projectNO; + using glm::projectZO; + using glm::quarter_pi; + using glm::quatLookAt; + using glm::quatLookAtLH; + using glm::quatLookAtRH; + using glm::quat_cast; + using glm::radians; + using glm::reflect; + using glm::refract; + using glm::repeat; + using glm::roll; + using glm::root_five; + using glm::root_half_pi; + using glm::root_ln_four; + using glm::root_pi; + using glm::root_three; + using glm::root_two; + using glm::root_two_pi; + using glm::rotate; + using glm::round; + using glm::roundEven; + using glm::roundMultiple; + using glm::roundPowerOfTwo; + using glm::row; + using glm::scale; + using glm::sec; + using glm::sech; + using glm::sign; + using glm::simplex; + using glm::sin; + using glm::sinh; + using glm::slerp; + using glm::smoothstep; + using glm::sphericalRand; + using glm::sqrt; + using glm::step; + using glm::tan; + using glm::tanh; + using glm::third; + using glm::three_over_two_pi; + using glm::translate; + using glm::transpose; + using glm::trunc; + using glm::tweakedInfinitePerspective; + using glm::two_over_pi; + using glm::two_over_root_pi; + using glm::two_pi; + using glm::two_thirds; + using glm::uaddCarry; + using glm::uintBitsToFloat; + using glm::umulExtended; + using glm::unProject; + using glm::unProjectNO; + using glm::unProjectZO; + using glm::unpackF2x11_1x10; + using glm::unpackF3x9_E1x5; + using glm::unpackHalf; + using glm::unpackHalf1x16; + using glm::unpackHalf4x16; + using glm::unpackI3x10_1x2; + using glm::unpackInt2x16; + using glm::unpackInt2x32; + using glm::unpackInt2x8; + using glm::unpackInt4x16; + using glm::unpackInt4x8; + using glm::unpackRGBM; + using glm::unpackSnorm; + using glm::unpackSnorm1x16; + using glm::unpackSnorm1x8; + using glm::unpackSnorm2x8; + using glm::unpackSnorm3x10_1x2; + using glm::unpackSnorm4x16; + using glm::unpackU3x10_1x2; + using glm::unpackUint2x16; + using glm::unpackUint2x32; + using glm::unpackUint2x8; + using glm::unpackUint4x16; + using glm::unpackUint4x8; + using glm::unpackUnorm; + using glm::unpackUnorm1x16; + using glm::unpackUnorm1x5_1x6_1x5; + using glm::unpackUnorm1x8; + using glm::unpackUnorm2x3_1x2; + using glm::unpackUnorm2x4; + using glm::unpackUnorm2x8; + using glm::unpackUnorm3x10_1x2; + using glm::unpackUnorm3x5_1x1; + using glm::unpackUnorm4x16; + using glm::unpackUnorm4x4; + using glm::uround; + using glm::usubBorrow; + using glm::value_ptr; + using glm::yaw; + using glm::zero; + } + +# ifdef GLM_EXT_INLINE_NAMESPACE + inline +# endif + namespace ext { + using glm::abs; + using glm::acos; + using glm::acosh; + using glm::acot; + using glm::acoth; + using glm::acsc; + using glm::acsch; + using glm::all; + using glm::angle; + using glm::angleAxis; + using glm::any; + using glm::asec; + using glm::asech; + using glm::asin; + using glm::asinh; + using glm::atan; + using glm::atanh; + using glm::axis; + using glm::ceil; + using glm::clamp; + using glm::conjugate; + using glm::cos; + using glm::cos_one_over_two; + using glm::cosh; + using glm::cot; + using glm::coth; + using glm::cross; + using glm::csc; + using glm::csch; + using glm::degrees; + using glm::determinant; + using glm::distance; + using glm::dot; + using glm::e; + using glm::epsilon; + using glm::equal; + using glm::euler; + using glm::exp; + using glm::exp2; + using glm::faceforward; + using glm::fclamp; + using glm::findNSB; + using glm::floatBitsToInt; + using glm::floatBitsToUint; + using glm::floatDistance; + using glm::floor; + using glm::fma; + using glm::fmax; + using glm::fmin; + using glm::four_over_pi; + using glm::fract; + using glm::frexp; + using glm::frustum; + using glm::frustumLH; + using glm::frustumLH_NO; + using glm::frustumLH_ZO; + using glm::frustumNO; + using glm::frustumRH; + using glm::frustumRH_NO; + using glm::frustumRH_ZO; + using glm::frustumZO; + using glm::golden_ratio; + using glm::greaterThan; + using glm::greaterThanEqual; + using glm::half_pi; + using glm::identity; + using glm::infinitePerspective; + using glm::infinitePerspectiveLH; + using glm::infinitePerspectiveRH; + using glm::intBitsToFloat; + using glm::inverse; + using glm::inversesqrt; + using glm::iround; + using glm::isMultiple; + using glm::isPowerOfTwo; + using glm::isinf; + using glm::isnan; + using glm::ldexp; + using glm::length; + using glm::lerp; + using glm::lessThan; + using glm::lessThanEqual; + using glm::ln_ln_two; + using glm::ln_ten; + using glm::ln_two; + using glm::log; + using glm::log2; + using glm::lookAt; + using glm::lookAtLH; + using glm::lookAtRH; + using glm::matrixCompMult; + using glm::max; + using glm::min; + using glm::mirrorClamp; + using glm::mirrorRepeat; + using glm::mix; + using glm::mod; + using glm::modf; + using glm::nextFloat; + using glm::nextMultiple; + using glm::nextPowerOfTwo; + using glm::normalize; + using glm::notEqual; + using glm::not_; + using glm::one; + using glm::one_over_pi; + using glm::one_over_root_two; + using glm::one_over_two_pi; + using glm::ortho; + using glm::orthoLH; + using glm::orthoLH_NO; + using glm::orthoLH_ZO; + using glm::orthoNO; + using glm::orthoRH; + using glm::orthoRH_NO; + using glm::orthoRH_ZO; + using glm::orthoZO; + using glm::outerProduct; + using glm::perspective; + using glm::perspectiveFov; + using glm::perspectiveFovLH; + using glm::perspectiveFovLH_NO; + using glm::perspectiveFovLH_ZO; + using glm::perspectiveFovNO; + using glm::perspectiveFovRH; + using glm::perspectiveFovRH_NO; + using glm::perspectiveFovRH_ZO; + using glm::perspectiveFovZO; + using glm::perspectiveLH; + using glm::perspectiveLH_NO; + using glm::perspectiveLH_ZO; + using glm::perspectiveNO; + using glm::perspectiveRH; + using glm::perspectiveRH_NO; + using glm::perspectiveRH_ZO; + using glm::perspectiveZO; + using glm::pi; + using glm::pickMatrix; + using glm::pow; + using glm::prevFloat; + using glm::prevMultiple; + using glm::prevPowerOfTwo; + using glm::project; + using glm::projectNO; + using glm::projectZO; + using glm::quarter_pi; + using glm::radians; + using glm::reflect; + using glm::refract; + using glm::repeat; + using glm::root_five; + using glm::root_half_pi; + using glm::root_ln_four; + using glm::root_pi; + using glm::root_three; + using glm::root_two; + using glm::root_two_pi; + using glm::rotate; + using glm::round; + using glm::roundEven; + using glm::scale; + using glm::sec; + using glm::sech; + using glm::sign; + using glm::sin; + using glm::sinh; + using glm::slerp; + using glm::smoothstep; + using glm::sqrt; + using glm::step; + using glm::tan; + using glm::tanh; + using glm::third; + using glm::three_over_two_pi; + using glm::translate; + using glm::transpose; + using glm::trunc; + using glm::tweakedInfinitePerspective; + using glm::two_over_pi; + using glm::two_over_root_pi; + using glm::two_pi; + using glm::two_thirds; + using glm::uintBitsToFloat; + using glm::unProject; + using glm::unProjectNO; + using glm::unProjectZO; + using glm::uround; + using glm::zero; + } + +# ifdef GLM_ENABLE_EXPERIMENTAL +# ifdef GLM_GTX_INLINE_NAMESPACE + inline +# endif + namespace gtx { + using glm::io::order_type; + using glm::io::format_punct; + using glm::io::basic_state_saver; + using glm::io::basic_format_saver; + using glm::io::precision; + using glm::io::width; + using glm::io::delimeter; + using glm::io::order; + using glm::io::get_facet; + using glm::io::formatted; + using glm::io::unformatted; + using glm::io::operator<<; + using glm::operator<<; + using glm::tdualquat; + +# if !((GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)) + using glm::to_string; +# endif +# if GLM_HAS_TEMPLATE_ALIASES + using glm::operator*; + using glm::operator/; +# endif +# if GLM_HAS_RANGE_FOR + using glm::components; + using glm::begin; + using glm::end; +# endif + + using glm::abs; + using glm::acos; + using glm::acosh; + using glm::adjugate; + using glm::all; + using glm::angle; + using glm::angleAxis; + using glm::any; + using glm::areCollinear; + using glm::areOrthogonal; + using glm::areOrthonormal; + using glm::asin; + using glm::asinh; + using glm::associatedMax; + using glm::associatedMin; + using glm::atan; + using glm::atanh; + using glm::axis; + using glm::axisAngle; + using glm::axisAngleMatrix; + using glm::backEaseIn; + using glm::backEaseInOut; + using glm::backEaseOut; + using glm::bitCount; + using glm::bitfieldDeinterleave; + using glm::bitfieldExtract; + using glm::bitfieldFillOne; + using glm::bitfieldFillZero; + using glm::bitfieldInsert; + using glm::bitfieldInterleave; + using glm::bitfieldReverse; + using glm::bitfieldRotateLeft; + using glm::bitfieldRotateRight; + using glm::bounceEaseIn; + using glm::bounceEaseInOut; + using glm::bounceEaseOut; + using glm::catmullRom; + using glm::ceil; + using glm::circularEaseIn; + using glm::circularEaseInOut; + using glm::circularEaseOut; + using glm::clamp; + using glm::closeBounded; + using glm::closestPointOnLine; + using glm::colMajor2; + using glm::colMajor3; + using glm::colMajor4; + using glm::compAdd; + using glm::compMax; + using glm::compMin; + using glm::compMul; + using glm::compNormalize; + using glm::compScale; + using glm::computeCovarianceMatrix; + using glm::conjugate; + using glm::convertD65XYZToD50XYZ; + using glm::convertD65XYZToLinearSRGB; + using glm::convertLinearSRGBToD50XYZ; + using glm::convertLinearSRGBToD65XYZ; + using glm::cos; + using glm::cos_one_over_two; + using glm::cosh; + using glm::cross; + using glm::cubic; + using glm::cubicEaseIn; + using glm::cubicEaseInOut; + using glm::cubicEaseOut; + using glm::decompose; + using glm::degrees; + using glm::derivedEulerAngleX; + using glm::derivedEulerAngleY; + using glm::derivedEulerAngleZ; + using glm::determinant; + using glm::diagonal2x2; + using glm::diagonal2x3; + using glm::diagonal2x4; + using glm::diagonal3x2; + using glm::diagonal3x3; + using glm::diagonal3x4; + using glm::diagonal4x2; + using glm::diagonal4x3; + using glm::diagonal4x4; + using glm::distance; + using glm::distance2; + using glm::dot; + using glm::dual_quat_identity; + using glm::dualquat_cast; + using glm::e; + using glm::elasticEaseIn; + using glm::elasticEaseInOut; + using glm::elasticEaseOut; + using glm::epsilon; + using glm::epsilonEqual; + using glm::epsilonNotEqual; + using glm::equal; + using glm::euclidean; + using glm::euler; + using glm::eulerAngleX; + using glm::eulerAngleXY; + using glm::eulerAngleXYX; + using glm::eulerAngleXYZ; + using glm::eulerAngleXZ; + using glm::eulerAngleXZX; + using glm::eulerAngleXZY; + using glm::eulerAngleY; + using glm::eulerAngleYX; + using glm::eulerAngleYXY; + using glm::eulerAngleYXZ; + using glm::eulerAngleYZ; + using glm::eulerAngleYZX; + using glm::eulerAngleYZY; + using glm::eulerAngleZ; + using glm::eulerAngleZX; + using glm::eulerAngleZXY; + using glm::eulerAngleZXZ; + using glm::eulerAngleZY; + using glm::eulerAngleZYX; + using glm::eulerAngleZYZ; + using glm::eulerAngles; + using glm::exp; + using glm::exp2; + using glm::exponentialEaseIn; + using glm::exponentialEaseInOut; + using glm::exponentialEaseOut; + using glm::extend; + using glm::extractEulerAngleXYX; + using glm::extractEulerAngleXYZ; + using glm::extractEulerAngleXZX; + using glm::extractEulerAngleXZY; + using glm::extractEulerAngleYXY; + using glm::extractEulerAngleYXZ; + using glm::extractEulerAngleYZX; + using glm::extractEulerAngleYZY; + using glm::extractEulerAngleZXY; + using glm::extractEulerAngleZXZ; + using glm::extractEulerAngleZYX; + using glm::extractEulerAngleZYZ; + using glm::extractMatrixRotation; + using glm::extractRealComponent; + using glm::faceforward; + using glm::factorial; + using glm::fastAcos; + using glm::fastAsin; + using glm::fastAtan; + using glm::fastCos; + using glm::fastDistance; + using glm::fastExp; + using glm::fastExp2; + using glm::fastInverseSqrt; + using glm::fastLength; + using glm::fastLog; + using glm::fastLog2; + using glm::fastMix; + using glm::fastNormalize; + using glm::fastNormalizeDot; + using glm::fastPow; + using glm::fastSin; + using glm::fastSqrt; + using glm::fastTan; + using glm::fclamp; + using glm::findLSB; + using glm::findMSB; + using glm::fliplr; + using glm::flipud; + using glm::floatBitsToInt; + using glm::floatBitsToUint; + using glm::floor; + using glm::floor_log2; + using glm::fma; + using glm::fmax; + using glm::fmin; + using glm::fmod; + using glm::four_over_pi; + using glm::fract; + using glm::frexp; + using glm::frustum; + using glm::frustumLH; + using glm::frustumLH_NO; + using glm::frustumLH_ZO; + using glm::frustumNO; + using glm::frustumRH; + using glm::frustumRH_NO; + using glm::frustumRH_ZO; + using glm::frustumZO; + using glm::gauss; + using glm::golden_ratio; + using glm::greaterThan; + using glm::greaterThanEqual; + using glm::half_pi; + using glm::hermite; + using glm::highestBitValue; + using glm::hsvColor; + using glm::identity; + using glm::imulExtended; + using glm::infinitePerspective; + using glm::infinitePerspectiveLH; + using glm::infinitePerspectiveRH; + using glm::intBitsToFloat; + using glm::intermediate; + using glm::interpolate; + using glm::intersectLineSphere; + using glm::intersectLineTriangle; + using glm::intersectRayPlane; + using glm::intersectRaySphere; + using glm::intersectRayTriangle; + using glm::inverse; + using glm::inversesqrt; + using glm::iround; + using glm::isCompNull; + using glm::isIdentity; + using glm::isNormalized; + using glm::isNull; + using glm::isOrthogonal; + using glm::isdenormal; + using glm::isfinite; + using glm::isinf; + using glm::isnan; + using glm::l1Norm; + using glm::l2Norm; + using glm::lMaxNorm; + using glm::ldexp; + using glm::leftHanded; + using glm::length; + using glm::length2; + using glm::lerp; + using glm::lessThan; + using glm::lessThanEqual; + using glm::linearGradient; + using glm::linearInterpolation; + using glm::ln_ln_two; + using glm::ln_ten; + using glm::ln_two; + using glm::log; + using glm::log2; + using glm::lookAt; + using glm::lookAtLH; + using glm::lookAtRH; + using glm::lowestBitValue; + using glm::luminosity; + using glm::lxNorm; + using glm::make_mat2; + using glm::make_mat2x2; + using glm::make_mat2x3; + using glm::make_mat2x4; + using glm::make_mat3; + using glm::make_mat3x2; + using glm::make_mat3x3; + using glm::make_mat3x4; + using glm::make_mat4; + using glm::make_mat4x2; + using glm::make_mat4x3; + using glm::make_mat4x4; + using glm::make_quat; + using glm::make_vec1; + using glm::make_vec2; + using glm::make_vec3; + using glm::make_vec4; + using glm::mask; + using glm::mat2x4_cast; + using glm::mat3_cast; + using glm::mat3x4_cast; + using glm::mat4_cast; + using glm::matrixCompMult; + using glm::matrixCross3; + using glm::matrixCross4; + using glm::max; + using glm::min; + using glm::mirrorClamp; + using glm::mirrorRepeat; + using glm::mix; + using glm::mixedProduct; + using glm::mod; + using glm::modf; + using glm::nlz; + using glm::normalize; + using glm::normalizeDot; + using glm::notEqual; + using glm::not_; + using glm::YCoCg2rgb; + using glm::YCoCgR2rgb; + using glm::one; + using glm::one_over_pi; + using glm::one_over_root_two; + using glm::one_over_two_pi; + using glm::openBounded; + using glm::orientate2; + using glm::orientate3; + using glm::orientate4; + using glm::orientation; + using glm::orientedAngle; + using glm::ortho; + using glm::orthoLH; + using glm::orthoLH_NO; + using glm::orthoLH_ZO; + using glm::orthoNO; + using glm::orthoRH; + using glm::orthoRH_NO; + using glm::orthoRH_ZO; + using glm::orthoZO; + using glm::orthonormalize; + using glm::outerProduct; + using glm::packDouble2x32; + using glm::packHalf2x16; + using glm::packSnorm2x16; + using glm::packSnorm4x8; + using glm::packUnorm2x16; + using glm::packUnorm4x8; + using glm::perp; + using glm::perspective; + using glm::perspectiveFov; + using glm::perspectiveFovLH; + using glm::perspectiveFovLH_NO; + using glm::perspectiveFovLH_ZO; + using glm::perspectiveFovNO; + using glm::perspectiveFovRH; + using glm::perspectiveFovRH_NO; + using glm::perspectiveFovRH_ZO; + using glm::perspectiveFovZO; + using glm::perspectiveLH; + using glm::perspectiveLH_NO; + using glm::perspectiveLH_ZO; + using glm::perspectiveNO; + using glm::perspectiveRH; + using glm::perspectiveRH_NO; + using glm::perspectiveRH_ZO; + using glm::perspectiveZO; + using glm::pi; + using glm::pickMatrix; + using glm::pitch; + using glm::polar; + using glm::pow; + using glm::pow2; + using glm::pow3; + using glm::pow4; + using glm::powerOfTwoAbove; + using glm::powerOfTwoBelow; + using glm::powerOfTwoNearest; + using glm::proj; + using glm::proj2D; + using glm::proj3D; + using glm::project; + using glm::projectNO; + using glm::projectZO; + using glm::qr_decompose; + using glm::quadraticEaseIn; + using glm::quadraticEaseInOut; + using glm::quadraticEaseOut; + using glm::quarter_pi; + using glm::quarticEaseIn; + using glm::quarticEaseInOut; + using glm::quarticEaseOut; + using glm::quatLookAt; + using glm::quatLookAtLH; + using glm::quatLookAtRH; + using glm::quat_cast; + using glm::quat_identity; + using glm::quinticEaseIn; + using glm::quinticEaseInOut; + using glm::quinticEaseOut; + using glm::radialGradient; + using glm::radians; + using glm::recompose; + using glm::reflect; + using glm::refract; + using glm::repeat; + using glm::rgb2YCoCg; + using glm::rgb2YCoCgR; + using glm::rgbColor; + using glm::rightHanded; + using glm::roll; + using glm::root_five; + using glm::root_half_pi; + using glm::root_ln_four; + using glm::root_pi; + using glm::root_three; + using glm::root_two; + using glm::root_two_pi; + using glm::rotate; + using glm::rotateNormalizedAxis; + using glm::rotateX; + using glm::rotateY; + using glm::rotateZ; + using glm::rotation; + using glm::round; + using glm::roundEven; + using glm::rowMajor2; + using glm::rowMajor3; + using glm::rowMajor4; + using glm::rq_decompose; + using glm::saturation; + using glm::scale; + using glm::scaleBias; + using glm::shearX2D; + using glm::shearX3D; + using glm::shearY2D; + using glm::shearY3D; + using glm::shearZ3D; + using glm::shortMix; + using glm::sign; + using glm::sin; + using glm::sineEaseIn; + using glm::sineEaseInOut; + using glm::sineEaseOut; + using glm::sinh; + using glm::slerp; + using glm::smoothstep; + using glm::sortEigenvalues; + using glm::sqrt; + using glm::squad; + using glm::step; + using glm::tan; + using glm::tanh; + using glm::third; + using glm::three_over_two_pi; + using glm::translate; + using glm::transpose; + using glm::triangleNormal; + using glm::trunc; + using glm::tweakedInfinitePerspective; + using glm::two_over_pi; + using glm::two_over_root_pi; + using glm::two_pi; + using glm::two_thirds; + using glm::uaddCarry; + using glm::uintBitsToFloat; + using glm::umulExtended; + using glm::unProject; + using glm::unProjectNO; + using glm::unProjectZO; + using glm::unpackDouble2x32; + using glm::unpackHalf2x16; + using glm::unpackSnorm2x16; + using glm::unpackSnorm4x8; + using glm::unpackUnorm2x16; + using glm::unpackUnorm4x8; + using glm::uround; + using glm::usubBorrow; + using glm::value_ptr; + using glm::wrapAngle; + using glm::wxyz; + using glm::yaw; + using glm::yawPitchRoll; + using glm::zero; + } +# endif +} + +#if defined(_MSC_VER) // Workaround +// Partial template specialization doesn't need to be exported explicitly, but this may not work otherwise on MSVC. +export namespace std { + using std::hash; // See GLM_GTX_hash +} +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/glm.hpp b/thirdparty/manifold/thirdparty/glm/glm/glm.hpp new file mode 100644 index 000000000000..8b375459a784 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/glm.hpp @@ -0,0 +1,137 @@ +/// @ref core +/// @file glm/glm.hpp +/// +/// @mainpage OpenGL Mathematics (GLM) +/// - Website: glm.g-truc.net +/// - GLM API documentation +/// - GLM Manual +/// +/// @defgroup core Core features +/// +/// @brief Features that implement in C++ the GLSL specification as closely as possible. +/// +/// The GLM core consists of C++ types that mirror GLSL types and +/// C++ functions that mirror the GLSL functions. +/// +/// The best documentation for GLM Core is the current GLSL specification, +/// version 4.2 +/// (pdf file). +/// +/// GLM core functionalities require to be included to be used. +/// +/// +/// @defgroup core_vector Vector types +/// +/// Vector types of two to four components with an exhaustive set of operators. +/// +/// @ingroup core +/// +/// +/// @defgroup core_vector_precision Vector types with precision qualifiers +/// +/// @brief Vector types with precision qualifiers which may result in various precision in term of ULPs +/// +/// GLSL allows defining qualifiers for particular variables. +/// With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility, +/// with OpenGL ES's GLSL, these qualifiers do have an effect. +/// +/// C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing: +/// a number of typedefs that use a particular qualifier. +/// +/// None of these types make any guarantees about the actual qualifier used. +/// +/// @ingroup core +/// +/// +/// @defgroup core_matrix Matrix types +/// +/// Matrix types of with C columns and R rows where C and R are values between 2 to 4 included. +/// These types have exhaustive sets of operators. +/// +/// @ingroup core +/// +/// +/// @defgroup core_matrix_precision Matrix types with precision qualifiers +/// +/// @brief Matrix types with precision qualifiers which may result in various precision in term of ULPs +/// +/// GLSL allows defining qualifiers for particular variables. +/// With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility, +/// with OpenGL ES's GLSL, these qualifiers do have an effect. +/// +/// C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing: +/// a number of typedefs that use a particular qualifier. +/// +/// None of these types make any guarantees about the actual qualifier used. +/// +/// @ingroup core +/// +/// +/// @defgroup ext Stable extensions +/// +/// @brief Additional features not specified by GLSL specification. +/// +/// EXT extensions are fully tested and documented. +/// +/// Even if it's highly unrecommended, it's possible to include all the extensions at once by +/// including . Otherwise, each extension needs to be included a specific file. +/// +/// +/// @defgroup gtc Recommended extensions +/// +/// @brief Additional features not specified by GLSL specification. +/// +/// GTC extensions aim to be stable with tests and documentation. +/// +/// Even if it's highly unrecommended, it's possible to include all the extensions at once by +/// including . Otherwise, each extension needs to be included a specific file. +/// +/// +/// @defgroup gtx Experimental extensions +/// +/// @brief Experimental features not specified by GLSL specification. +/// +/// Experimental extensions are useful functions and types, but the development of +/// their API and functionality is not necessarily stable. They can change +/// substantially between versions. Backwards compatibility is not much of an issue +/// for them. +/// +/// Even if it's highly unrecommended, it's possible to include all the extensions +/// at once by including . Otherwise, each extension needs to be +/// included a specific file. +/// + +#include "detail/_fixes.hpp" + +#include "detail/setup.hpp" + +#pragma once + +#include +#include +#include +#include +#include +#include "fwd.hpp" + +#include "vec2.hpp" +#include "vec3.hpp" +#include "vec4.hpp" +#include "mat2x2.hpp" +#include "mat2x3.hpp" +#include "mat2x4.hpp" +#include "mat3x2.hpp" +#include "mat3x3.hpp" +#include "mat3x4.hpp" +#include "mat4x2.hpp" +#include "mat4x3.hpp" +#include "mat4x4.hpp" + +#include "trigonometric.hpp" +#include "exponential.hpp" +#include "common.hpp" +#include "packing.hpp" +#include "geometric.hpp" +#include "matrix.hpp" +#include "vector_relational.hpp" +#include "integer.hpp" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/bitfield.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/bitfield.hpp new file mode 100644 index 000000000000..084fbe75ff14 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/bitfield.hpp @@ -0,0 +1,266 @@ +/// @ref gtc_bitfield +/// @file glm/gtc/bitfield.hpp +/// +/// @see core (dependence) +/// @see gtc_bitfield (dependence) +/// +/// @defgroup gtc_bitfield GLM_GTC_bitfield +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Allow to perform bit operations on integer values + +#include "../detail/setup.hpp" + +#pragma once + +// Dependencies +#include "../ext/scalar_int_sized.hpp" +#include "../ext/scalar_uint_sized.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_vectorize.hpp" +#include "type_precision.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_bitfield extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_bitfield + /// @{ + + /// Build a mask of 'count' bits + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL genIUType mask(genIUType Bits); + + /// Build a mask of 'count' bits + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed and unsigned integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL vec mask(vec const& v); + + /// Rotate all bits to the right. All the bits dropped in the right side are inserted back on the left side. + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL genIUType bitfieldRotateRight(genIUType In, int Shift); + + /// Rotate all bits to the right. All the bits dropped in the right side are inserted back on the left side. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed and unsigned integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL vec bitfieldRotateRight(vec const& In, int Shift); + + /// Rotate all bits to the left. All the bits dropped in the left side are inserted back on the right side. + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL genIUType bitfieldRotateLeft(genIUType In, int Shift); + + /// Rotate all bits to the left. All the bits dropped in the left side are inserted back on the right side. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed and unsigned integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL vec bitfieldRotateLeft(vec const& In, int Shift); + + /// Set to 1 a range of bits. + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount); + + /// Set to 1 a range of bits. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed and unsigned integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL vec bitfieldFillOne(vec const& Value, int FirstBit, int BitCount); + + /// Set to 0 a range of bits. + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount); + + /// Set to 0 a range of bits. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed and unsigned integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL vec bitfieldFillZero(vec const& Value, int FirstBit, int BitCount); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int16 bitfieldInterleave(int8 x, int8 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint16 bitfieldInterleave(uint8 x, uint8 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of v.x followed by the first bit of v.y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint16 bitfieldInterleave(u8vec2 const& v); + + /// Deinterleaves the bits of x. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL glm::u8vec2 bitfieldDeinterleave(glm::uint16 x); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int32 bitfieldInterleave(int16 x, int16 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint32 bitfieldInterleave(uint16 x, uint16 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of v.x followed by the first bit of v.y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint32 bitfieldInterleave(u16vec2 const& v); + + /// Deinterleaves the bits of x. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL glm::u16vec2 bitfieldDeinterleave(glm::uint32 x); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of v.x followed by the first bit of v.y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint64 bitfieldInterleave(u32vec2 const& v); + + /// Deinterleaves the bits of x. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL glm::u32vec2 bitfieldDeinterleave(glm::uint64 x); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y, int32 z); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z); + + /// Interleaves the bits of x, y, z and w. + /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w); + + /// Interleaves the bits of x, y, z and w. + /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w); + + /// Interleaves the bits of x, y, z and w. + /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w); + + /// Interleaves the bits of x, y, z and w. + /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w); + + /// @} +} //namespace glm + +#include "bitfield.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/bitfield.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/bitfield.inl new file mode 100644 index 000000000000..06cf1889cd40 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/bitfield.inl @@ -0,0 +1,626 @@ +/// @ref gtc_bitfield + +#include "../simd/integer.h" + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y); + + template + GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z); + + template + GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z, PARAM w); + + template<> + GLM_FUNC_QUALIFIER glm::uint16 bitfieldInterleave(glm::uint8 x, glm::uint8 y) + { + glm::uint16 REG1(x); + glm::uint16 REG2(y); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F); + REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x3333); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x3333); + + REG1 = ((REG1 << 1) | REG1) & static_cast(0x5555); + REG2 = ((REG2 << 1) | REG2) & static_cast(0x5555); + + return REG1 | static_cast(REG2 << 1); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint16 x, glm::uint16 y) + { + glm::uint32 REG1(x); + glm::uint32 REG2(y); + + REG1 = ((REG1 << 8) | REG1) & static_cast(0x00FF00FF); + REG2 = ((REG2 << 8) | REG2) & static_cast(0x00FF00FF); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F0F0F); + REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F0F0F); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x33333333); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x33333333); + + REG1 = ((REG1 << 1) | REG1) & static_cast(0x55555555); + REG2 = ((REG2 << 1) | REG2) & static_cast(0x55555555); + + return REG1 | (REG2 << 1); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y) + { + glm::uint64 REG1(x); + glm::uint64 REG2(y); + + REG1 = ((REG1 << 16) | REG1) & static_cast(0x0000FFFF0000FFFFull); + REG2 = ((REG2 << 16) | REG2) & static_cast(0x0000FFFF0000FFFFull); + + REG1 = ((REG1 << 8) | REG1) & static_cast(0x00FF00FF00FF00FFull); + REG2 = ((REG2 << 8) | REG2) & static_cast(0x00FF00FF00FF00FFull); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F0F0F0F0F0F0Full); + REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F0F0F0F0F0F0Full); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x3333333333333333ull); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x3333333333333333ull); + + REG1 = ((REG1 << 1) | REG1) & static_cast(0x5555555555555555ull); + REG2 = ((REG2 << 1) | REG2) & static_cast(0x5555555555555555ull); + + return REG1 | (REG2 << 1); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z) + { + glm::uint32 REG1(x); + glm::uint32 REG2(y); + glm::uint32 REG3(z); + + REG1 = ((REG1 << 16) | REG1) & static_cast(0xFF0000FFu); + REG2 = ((REG2 << 16) | REG2) & static_cast(0xFF0000FFu); + REG3 = ((REG3 << 16) | REG3) & static_cast(0xFF0000FFu); + + REG1 = ((REG1 << 8) | REG1) & static_cast(0x0F00F00Fu); + REG2 = ((REG2 << 8) | REG2) & static_cast(0x0F00F00Fu); + REG3 = ((REG3 << 8) | REG3) & static_cast(0x0F00F00Fu); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0xC30C30C3u); + REG2 = ((REG2 << 4) | REG2) & static_cast(0xC30C30C3u); + REG3 = ((REG3 << 4) | REG3) & static_cast(0xC30C30C3u); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x49249249u); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x49249249u); + REG3 = ((REG3 << 2) | REG3) & static_cast(0x49249249u); + + return REG1 | (REG2 << 1) | (REG3 << 2); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z) + { + glm::uint64 REG1(x); + glm::uint64 REG2(y); + glm::uint64 REG3(z); + + REG1 = ((REG1 << 32) | REG1) & static_cast(0xFFFF00000000FFFFull); + REG2 = ((REG2 << 32) | REG2) & static_cast(0xFFFF00000000FFFFull); + REG3 = ((REG3 << 32) | REG3) & static_cast(0xFFFF00000000FFFFull); + + REG1 = ((REG1 << 16) | REG1) & static_cast(0x00FF0000FF0000FFull); + REG2 = ((REG2 << 16) | REG2) & static_cast(0x00FF0000FF0000FFull); + REG3 = ((REG3 << 16) | REG3) & static_cast(0x00FF0000FF0000FFull); + + REG1 = ((REG1 << 8) | REG1) & static_cast(0xF00F00F00F00F00Full); + REG2 = ((REG2 << 8) | REG2) & static_cast(0xF00F00F00F00F00Full); + REG3 = ((REG3 << 8) | REG3) & static_cast(0xF00F00F00F00F00Full); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0x30C30C30C30C30C3ull); + REG2 = ((REG2 << 4) | REG2) & static_cast(0x30C30C30C30C30C3ull); + REG3 = ((REG3 << 4) | REG3) & static_cast(0x30C30C30C30C30C3ull); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x9249249249249249ull); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x9249249249249249ull); + REG3 = ((REG3 << 2) | REG3) & static_cast(0x9249249249249249ull); + + return REG1 | (REG2 << 1) | (REG3 << 2); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y, glm::uint32 z) + { + glm::uint64 REG1(x); + glm::uint64 REG2(y); + glm::uint64 REG3(z); + + REG1 = ((REG1 << 32) | REG1) & static_cast(0xFFFF00000000FFFFull); + REG2 = ((REG2 << 32) | REG2) & static_cast(0xFFFF00000000FFFFull); + REG3 = ((REG3 << 32) | REG3) & static_cast(0xFFFF00000000FFFFull); + + REG1 = ((REG1 << 16) | REG1) & static_cast(0x00FF0000FF0000FFull); + REG2 = ((REG2 << 16) | REG2) & static_cast(0x00FF0000FF0000FFull); + REG3 = ((REG3 << 16) | REG3) & static_cast(0x00FF0000FF0000FFull); + + REG1 = ((REG1 << 8) | REG1) & static_cast(0xF00F00F00F00F00Full); + REG2 = ((REG2 << 8) | REG2) & static_cast(0xF00F00F00F00F00Full); + REG3 = ((REG3 << 8) | REG3) & static_cast(0xF00F00F00F00F00Full); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0x30C30C30C30C30C3ull); + REG2 = ((REG2 << 4) | REG2) & static_cast(0x30C30C30C30C30C3ull); + REG3 = ((REG3 << 4) | REG3) & static_cast(0x30C30C30C30C30C3ull); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x9249249249249249ull); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x9249249249249249ull); + REG3 = ((REG3 << 2) | REG3) & static_cast(0x9249249249249249ull); + + return REG1 | (REG2 << 1) | (REG3 << 2); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z, glm::uint8 w) + { + glm::uint32 REG1(x); + glm::uint32 REG2(y); + glm::uint32 REG3(z); + glm::uint32 REG4(w); + + REG1 = ((REG1 << 12) | REG1) & static_cast(0x000F000Fu); + REG2 = ((REG2 << 12) | REG2) & static_cast(0x000F000Fu); + REG3 = ((REG3 << 12) | REG3) & static_cast(0x000F000Fu); + REG4 = ((REG4 << 12) | REG4) & static_cast(0x000F000Fu); + + REG1 = ((REG1 << 6) | REG1) & static_cast(0x03030303u); + REG2 = ((REG2 << 6) | REG2) & static_cast(0x03030303u); + REG3 = ((REG3 << 6) | REG3) & static_cast(0x03030303u); + REG4 = ((REG4 << 6) | REG4) & static_cast(0x03030303u); + + REG1 = ((REG1 << 3) | REG1) & static_cast(0x11111111u); + REG2 = ((REG2 << 3) | REG2) & static_cast(0x11111111u); + REG3 = ((REG3 << 3) | REG3) & static_cast(0x11111111u); + REG4 = ((REG4 << 3) | REG4) & static_cast(0x11111111u); + + return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z, glm::uint16 w) + { + glm::uint64 REG1(x); + glm::uint64 REG2(y); + glm::uint64 REG3(z); + glm::uint64 REG4(w); + + REG1 = ((REG1 << 24) | REG1) & static_cast(0x000000FF000000FFull); + REG2 = ((REG2 << 24) | REG2) & static_cast(0x000000FF000000FFull); + REG3 = ((REG3 << 24) | REG3) & static_cast(0x000000FF000000FFull); + REG4 = ((REG4 << 24) | REG4) & static_cast(0x000000FF000000FFull); + + REG1 = ((REG1 << 12) | REG1) & static_cast(0x000F000F000F000Full); + REG2 = ((REG2 << 12) | REG2) & static_cast(0x000F000F000F000Full); + REG3 = ((REG3 << 12) | REG3) & static_cast(0x000F000F000F000Full); + REG4 = ((REG4 << 12) | REG4) & static_cast(0x000F000F000F000Full); + + REG1 = ((REG1 << 6) | REG1) & static_cast(0x0303030303030303ull); + REG2 = ((REG2 << 6) | REG2) & static_cast(0x0303030303030303ull); + REG3 = ((REG3 << 6) | REG3) & static_cast(0x0303030303030303ull); + REG4 = ((REG4 << 6) | REG4) & static_cast(0x0303030303030303ull); + + REG1 = ((REG1 << 3) | REG1) & static_cast(0x1111111111111111ull); + REG2 = ((REG2 << 3) | REG2) & static_cast(0x1111111111111111ull); + REG3 = ((REG3 << 3) | REG3) & static_cast(0x1111111111111111ull); + REG4 = ((REG4 << 3) | REG4) & static_cast(0x1111111111111111ull); + + return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3); + } +}//namespace detail + + template + GLM_FUNC_QUALIFIER genIUType mask(genIUType Bits) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'mask' accepts only integer values"); + + return Bits >= sizeof(genIUType) * 8 ? ~static_cast(0) : (static_cast(1) << Bits) - static_cast(1); + } + + template + GLM_FUNC_QUALIFIER vec mask(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'mask' accepts only integer values"); + + return detail::functor1::call(mask, v); + } + + template + GLM_FUNC_QUALIFIER genIType bitfieldRotateRight(genIType In, int Shift) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateRight' accepts only integer values"); + + int const BitSize = static_cast(sizeof(genIType) * 8); + return (In << static_cast(Shift)) | (In >> static_cast(BitSize - Shift)); + } + + template + GLM_FUNC_QUALIFIER vec bitfieldRotateRight(vec const& In, int Shift) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateRight' accepts only integer values"); + + int const BitSize = static_cast(sizeof(T) * 8); + return (In << static_cast(Shift)) | (In >> static_cast(BitSize - Shift)); + } + + template + GLM_FUNC_QUALIFIER genIType bitfieldRotateLeft(genIType In, int Shift) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateLeft' accepts only integer values"); + + int const BitSize = static_cast(sizeof(genIType) * 8); + return (In >> static_cast(Shift)) | (In << static_cast(BitSize - Shift)); + } + + template + GLM_FUNC_QUALIFIER vec bitfieldRotateLeft(vec const& In, int Shift) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateLeft' accepts only integer values"); + + int const BitSize = static_cast(sizeof(T) * 8); + return (In >> static_cast(Shift)) | (In << static_cast(BitSize - Shift)); + } + + template + GLM_FUNC_QUALIFIER genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount) + { + return Value | static_cast(mask(BitCount) << FirstBit); + } + + template + GLM_FUNC_QUALIFIER vec bitfieldFillOne(vec const& Value, int FirstBit, int BitCount) + { + return Value | static_cast(mask(BitCount) << FirstBit); + } + + template + GLM_FUNC_QUALIFIER genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount) + { + return Value & static_cast(~(mask(BitCount) << FirstBit)); + } + + template + GLM_FUNC_QUALIFIER vec bitfieldFillZero(vec const& Value, int FirstBit, int BitCount) + { + return Value & static_cast(~(mask(BitCount) << FirstBit)); + } + + GLM_FUNC_QUALIFIER int16 bitfieldInterleave(int8 x, int8 y) + { + union sign8 + { + int8 i; + uint8 u; + } sign_x, sign_y; + + union sign16 + { + int16 i; + uint16 u; + } result; + + sign_x.i = x; + sign_y.i = y; + result.u = bitfieldInterleave(sign_x.u, sign_y.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(uint8 x, uint8 y) + { + return detail::bitfieldInterleave(x, y); + } + + GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(u8vec2 const& v) + { + return detail::bitfieldInterleave(v.x, v.y); + } + + GLM_FUNC_QUALIFIER u8vec2 bitfieldDeinterleave(glm::uint16 x) + { + uint16 REG1(x); + uint16 REG2(x >>= 1); + + REG1 = REG1 & static_cast(0x5555); + REG2 = REG2 & static_cast(0x5555); + + REG1 = ((REG1 >> 1) | REG1) & static_cast(0x3333); + REG2 = ((REG2 >> 1) | REG2) & static_cast(0x3333); + + REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F); + REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F); + + REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF); + REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF); + + REG1 = ((REG1 >> 8) | REG1) & static_cast(0xFFFF); + REG2 = ((REG2 >> 8) | REG2) & static_cast(0xFFFF); + + return glm::u8vec2(REG1, REG2); + } + + GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int16 x, int16 y) + { + union sign16 + { + int16 i; + uint16 u; + } sign_x, sign_y; + + union sign32 + { + int32 i; + uint32 u; + } result; + + sign_x.i = x; + sign_y.i = y; + result.u = bitfieldInterleave(sign_x.u, sign_y.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint16 x, uint16 y) + { + return detail::bitfieldInterleave(x, y); + } + + GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(u16vec2 const& v) + { + return detail::bitfieldInterleave(v.x, v.y); + } + + GLM_FUNC_QUALIFIER glm::u16vec2 bitfieldDeinterleave(glm::uint32 x) + { + glm::uint32 REG1(x); + glm::uint32 REG2(x >>= 1); + + REG1 = REG1 & static_cast(0x55555555); + REG2 = REG2 & static_cast(0x55555555); + + REG1 = ((REG1 >> 1) | REG1) & static_cast(0x33333333); + REG2 = ((REG2 >> 1) | REG2) & static_cast(0x33333333); + + REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F0F0F); + REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F0F0F); + + REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF00FF); + REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF00FF); + + REG1 = ((REG1 >> 8) | REG1) & static_cast(0x0000FFFF); + REG2 = ((REG2 >> 8) | REG2) & static_cast(0x0000FFFF); + + return glm::u16vec2(REG1, REG2); + } + + GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y) + { + union sign32 + { + int32 i; + uint32 u; + } sign_x, sign_y; + + union sign64 + { + int64 i; + uint64 u; + } result; + + sign_x.i = x; + sign_y.i = y; + result.u = bitfieldInterleave(sign_x.u, sign_y.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y) + { + return detail::bitfieldInterleave(x, y); + } + + GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(u32vec2 const& v) + { + return detail::bitfieldInterleave(v.x, v.y); + } + + GLM_FUNC_QUALIFIER glm::u32vec2 bitfieldDeinterleave(glm::uint64 x) + { + glm::uint64 REG1(x); + glm::uint64 REG2(x >>= 1); + + REG1 = REG1 & static_cast(0x5555555555555555ull); + REG2 = REG2 & static_cast(0x5555555555555555ull); + + REG1 = ((REG1 >> 1) | REG1) & static_cast(0x3333333333333333ull); + REG2 = ((REG2 >> 1) | REG2) & static_cast(0x3333333333333333ull); + + REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F0F0F0F0F0F0Full); + REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F0F0F0F0F0F0Full); + + REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF00FF00FF00FFull); + REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF00FF00FF00FFull); + + REG1 = ((REG1 >> 8) | REG1) & static_cast(0x0000FFFF0000FFFFull); + REG2 = ((REG2 >> 8) | REG2) & static_cast(0x0000FFFF0000FFFFull); + + REG1 = ((REG1 >> 16) | REG1) & static_cast(0x00000000FFFFFFFFull); + REG2 = ((REG2 >> 16) | REG2) & static_cast(0x00000000FFFFFFFFull); + + return glm::u32vec2(REG1, REG2); + } + + GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z) + { + union sign8 + { + int8 i; + uint8 u; + } sign_x, sign_y, sign_z; + + union sign32 + { + int32 i; + uint32 u; + } result; + + sign_x.i = x; + sign_y.i = y; + sign_z.i = z; + result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z) + { + return detail::bitfieldInterleave(x, y, z); + } + + GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(u8vec3 const& v) + { + return detail::bitfieldInterleave(v.x, v.y, v.z); + } + + GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z) + { + union sign16 + { + int16 i; + uint16 u; + } sign_x, sign_y, sign_z; + + union sign64 + { + int64 i; + uint64 u; + } result; + + sign_x.i = x; + sign_y.i = y; + sign_z.i = z; + result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z) + { + return detail::bitfieldInterleave(x, y, z); + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u16vec3 const& v) + { + return detail::bitfieldInterleave(v.x, v.y, v.z); + } + + GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y, int32 z) + { + union sign16 + { + int32 i; + uint32 u; + } sign_x, sign_y, sign_z; + + union sign64 + { + int64 i; + uint64 u; + } result; + + sign_x.i = x; + sign_y.i = y; + sign_z.i = z; + result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z) + { + return detail::bitfieldInterleave(x, y, z); + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u32vec3 const& v) + { + return detail::bitfieldInterleave(v.x, v.y, v.z); + } + + GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w) + { + union sign8 + { + int8 i; + uint8 u; + } sign_x, sign_y, sign_z, sign_w; + + union sign32 + { + int32 i; + uint32 u; + } result; + + sign_x.i = x; + sign_y.i = y; + sign_z.i = z; + sign_w.i = w; + result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w) + { + return detail::bitfieldInterleave(x, y, z, w); + } + + GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(u8vec4 const& v) + { + return detail::bitfieldInterleave(v.x, v.y, v.z, v.w); + } + + GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w) + { + union sign16 + { + int16 i; + uint16 u; + } sign_x, sign_y, sign_z, sign_w; + + union sign64 + { + int64 i; + uint64 u; + } result; + + sign_x.i = x; + sign_y.i = y; + sign_z.i = z; + sign_w.i = w; + result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w) + { + return detail::bitfieldInterleave(x, y, z, w); + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u16vec4 const& v) + { + return detail::bitfieldInterleave(v.x, v.y, v.z, v.w); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/color_space.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/color_space.hpp new file mode 100644 index 000000000000..cffd9f093fb9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/color_space.hpp @@ -0,0 +1,56 @@ +/// @ref gtc_color_space +/// @file glm/gtc/color_space.hpp +/// +/// @see core (dependence) +/// @see gtc_color_space (dependence) +/// +/// @defgroup gtc_color_space GLM_GTC_color_space +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Allow to perform bit operations on integer values + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../exponential.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_color_space extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_color_space + /// @{ + + /// Convert a linear color to sRGB color using a standard gamma correction. + /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb + template + GLM_FUNC_DECL vec convertLinearToSRGB(vec const& ColorLinear); + + /// Convert a linear color to sRGB color using a custom gamma correction. + /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb + template + GLM_FUNC_DECL vec convertLinearToSRGB(vec const& ColorLinear, T Gamma); + + /// Convert a sRGB color to linear color using a standard gamma correction. + /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb + template + GLM_FUNC_DECL vec convertSRGBToLinear(vec const& ColorSRGB); + + /// Convert a sRGB color to linear color using a custom gamma correction. + // IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb + template + GLM_FUNC_DECL vec convertSRGBToLinear(vec const& ColorSRGB, T Gamma); + + /// @} +} //namespace glm + +#include "color_space.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/color_space.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/color_space.inl new file mode 100644 index 000000000000..2a900044e99b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/color_space.inl @@ -0,0 +1,84 @@ +/// @ref gtc_color_space + +namespace glm{ +namespace detail +{ + template + struct compute_rgbToSrgb + { + GLM_FUNC_QUALIFIER static vec call(vec const& ColorRGB, T GammaCorrection) + { + vec const ClampedColor(clamp(ColorRGB, static_cast(0), static_cast(1))); + + return mix( + pow(ClampedColor, vec(GammaCorrection)) * static_cast(1.055) - static_cast(0.055), + ClampedColor * static_cast(12.92), + lessThan(ClampedColor, vec(static_cast(0.0031308)))); + } + }; + + template + struct compute_rgbToSrgb<4, T, Q> + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<4, T, Q> const& ColorRGB, T GammaCorrection) + { + return vec<4, T, Q>(compute_rgbToSrgb<3, T, Q>::call(vec<3, T, Q>(ColorRGB), GammaCorrection), ColorRGB.w); + } + }; + + template + struct compute_srgbToRgb + { + GLM_FUNC_QUALIFIER static vec call(vec const& ColorSRGB, T Gamma) + { + return mix( + pow((ColorSRGB + static_cast(0.055)) * static_cast(0.94786729857819905213270142180095), vec(Gamma)), + ColorSRGB * static_cast(0.07739938080495356037151702786378), + lessThanEqual(ColorSRGB, vec(static_cast(0.04045)))); + } + }; + + template + struct compute_srgbToRgb<4, T, Q> + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<4, T, Q> const& ColorSRGB, T Gamma) + { + return vec<4, T, Q>(compute_srgbToRgb<3, T, Q>::call(vec<3, T, Q>(ColorSRGB), Gamma), ColorSRGB.w); + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER vec convertLinearToSRGB(vec const& ColorLinear) + { + return detail::compute_rgbToSrgb::call(ColorLinear, static_cast(0.41666)); + } + + // Based on Ian Taylor http://chilliant.blogspot.fr/2012/08/srgb-approximations-for-hlsl.html + template<> + GLM_FUNC_QUALIFIER vec<3, float, lowp> convertLinearToSRGB(vec<3, float, lowp> const& ColorLinear) + { + vec<3, float, lowp> S1 = sqrt(ColorLinear); + vec<3, float, lowp> S2 = sqrt(S1); + vec<3, float, lowp> S3 = sqrt(S2); + return 0.662002687f * S1 + 0.684122060f * S2 - 0.323583601f * S3 - 0.0225411470f * ColorLinear; + } + + template + GLM_FUNC_QUALIFIER vec convertLinearToSRGB(vec const& ColorLinear, T Gamma) + { + return detail::compute_rgbToSrgb::call(ColorLinear, static_cast(1) / Gamma); + } + + template + GLM_FUNC_QUALIFIER vec convertSRGBToLinear(vec const& ColorSRGB) + { + return detail::compute_srgbToRgb::call(ColorSRGB, static_cast(2.4)); + } + + template + GLM_FUNC_QUALIFIER vec convertSRGBToLinear(vec const& ColorSRGB, T Gamma) + { + return detail::compute_srgbToRgb::call(ColorSRGB, Gamma); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/constants.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/constants.hpp new file mode 100644 index 000000000000..6a1f37d30f95 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/constants.hpp @@ -0,0 +1,170 @@ +/// @ref gtc_constants +/// @file glm/gtc/constants.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_constants GLM_GTC_constants +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Provide a list of constants and precomputed useful values. + +#pragma once + +// Dependencies +#include "../ext/scalar_constants.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_constants extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_constants + /// @{ + + /// Return 0. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType zero(); + + /// Return 1. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType one(); + + /// Return pi * 2. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType two_pi(); + + /// Return unit-circle circumference, or pi * 2. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType tau(); + + /// Return square root of pi. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_pi(); + + /// Return pi / 2. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType half_pi(); + + /// Return pi / 2 * 3. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType three_over_two_pi(); + + /// Return pi / 4. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType quarter_pi(); + + /// Return 1 / pi. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_pi(); + + /// Return 1 / (pi * 2). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_two_pi(); + + /// Return 2 / pi. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_pi(); + + /// Return 4 / pi. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType four_over_pi(); + + /// Return 2 / sqrt(pi). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_root_pi(); + + /// Return 1 / sqrt(2). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_root_two(); + + /// Return sqrt(pi / 2). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_half_pi(); + + /// Return sqrt(2 * pi). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_two_pi(); + + /// Return sqrt(ln(4)). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_ln_four(); + + /// Return e constant. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType e(); + + /// Return Euler's constant. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType euler(); + + /// Return sqrt(2). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_two(); + + /// Return sqrt(3). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_three(); + + /// Return sqrt(5). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_five(); + + /// Return ln(2). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType ln_two(); + + /// Return ln(10). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ten(); + + /// Return ln(ln(2)). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ln_two(); + + /// Return 1 / 3. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType third(); + + /// Return 2 / 3. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType two_thirds(); + + /// Return the golden ratio constant. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType golden_ratio(); + + /// @} +} //namespace glm + +#include "constants.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/constants.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/constants.inl new file mode 100644 index 000000000000..e9d3776148d0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/constants.inl @@ -0,0 +1,173 @@ +/// @ref gtc_constants + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType zero() + { + return genType(0); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one() + { + return genType(1); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_pi() + { + return genType(6.28318530717958647692528676655900576); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType tau() + { + return two_pi(); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_pi() + { + return genType(1.772453850905516027); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType half_pi() + { + return genType(1.57079632679489661923132169163975144); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType three_over_two_pi() + { + return genType(4.71238898038468985769396507491925432); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType quarter_pi() + { + return genType(0.785398163397448309615660845819875721); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_pi() + { + return genType(0.318309886183790671537767526745028724); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_two_pi() + { + return genType(0.159154943091895335768883763372514362); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_over_pi() + { + return genType(0.636619772367581343075535053490057448); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType four_over_pi() + { + return genType(1.273239544735162686151070106980114898); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_over_root_pi() + { + return genType(1.12837916709551257389615890312154517); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_root_two() + { + return genType(0.707106781186547524400844362104849039); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_half_pi() + { + return genType(1.253314137315500251); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_two_pi() + { + return genType(2.506628274631000502); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_ln_four() + { + return genType(1.17741002251547469); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType e() + { + return genType(2.71828182845904523536); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType euler() + { + return genType(0.577215664901532860606); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_two() + { + return genType(1.41421356237309504880168872420969808); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_three() + { + return genType(1.73205080756887729352744634150587236); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_five() + { + return genType(2.23606797749978969640917366873127623); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_two() + { + return genType(0.693147180559945309417232121458176568); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_ten() + { + return genType(2.30258509299404568401799145468436421); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_ln_two() + { + return genType(-0.3665129205816643); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType third() + { + return genType(0.3333333333333333333333333333333333333333); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_thirds() + { + return genType(0.666666666666666666666666666666666666667); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType golden_ratio() + { + return genType(1.61803398874989484820458683436563811); + } + +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/epsilon.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/epsilon.hpp new file mode 100644 index 000000000000..640439b11c36 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/epsilon.hpp @@ -0,0 +1,60 @@ +/// @ref gtc_epsilon +/// @file glm/gtc/epsilon.hpp +/// +/// @see core (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtc_epsilon GLM_GTC_epsilon +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Comparison functions for a user defined epsilon values. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_epsilon extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_epsilon + /// @{ + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL vec epsilonEqual(vec const& x, vec const& y, T const& epsilon); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL bool epsilonEqual(genType const& x, genType const& y, genType const& epsilon); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is not satisfied. + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL vec epsilonNotEqual(vec const& x, vec const& y, T const& epsilon); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// True if this expression is not satisfied. + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL bool epsilonNotEqual(genType const& x, genType const& y, genType const& epsilon); + + /// @} +}//namespace glm + +#include "epsilon.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/epsilon.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/epsilon.inl new file mode 100644 index 000000000000..508b9f8966fe --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/epsilon.inl @@ -0,0 +1,80 @@ +/// @ref gtc_epsilon + +// Dependency: +#include "../vector_relational.hpp" +#include "../common.hpp" + +namespace glm +{ + template<> + GLM_FUNC_QUALIFIER bool epsilonEqual + ( + float const& x, + float const& y, + float const& epsilon + ) + { + return abs(x - y) < epsilon; + } + + template<> + GLM_FUNC_QUALIFIER bool epsilonEqual + ( + double const& x, + double const& y, + double const& epsilon + ) + { + return abs(x - y) < epsilon; + } + + template + GLM_FUNC_QUALIFIER vec epsilonEqual(vec const& x, vec const& y, T const& epsilon) + { + return lessThan(abs(x - y), vec(epsilon)); + } + + template + GLM_FUNC_QUALIFIER vec epsilonEqual(vec const& x, vec const& y, vec const& epsilon) + { + return lessThan(abs(x - y), vec(epsilon)); + } + + template<> + GLM_FUNC_QUALIFIER bool epsilonNotEqual(float const& x, float const& y, float const& epsilon) + { + return abs(x - y) >= epsilon; + } + + template<> + GLM_FUNC_QUALIFIER bool epsilonNotEqual(double const& x, double const& y, double const& epsilon) + { + return abs(x - y) >= epsilon; + } + + template + GLM_FUNC_QUALIFIER vec epsilonNotEqual(vec const& x, vec const& y, T const& epsilon) + { + return greaterThanEqual(abs(x - y), vec(epsilon)); + } + + template + GLM_FUNC_QUALIFIER vec epsilonNotEqual(vec const& x, vec const& y, vec const& epsilon) + { + return greaterThanEqual(abs(x - y), vec(epsilon)); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> epsilonEqual(qua const& x, qua const& y, T const& epsilon) + { + vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); + return lessThan(abs(v), vec<4, T, Q>(epsilon)); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> epsilonNotEqual(qua const& x, qua const& y, T const& epsilon) + { + vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); + return greaterThanEqual(abs(v), vec<4, T, Q>(epsilon)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/integer.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/integer.hpp new file mode 100644 index 000000000000..ee52e0e0c154 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/integer.hpp @@ -0,0 +1,43 @@ +/// @ref gtc_integer +/// @file glm/gtc/integer.hpp +/// +/// @see core (dependence) +/// @see gtc_integer (dependence) +/// +/// @defgroup gtc_integer GLM_GTC_integer +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// @brief Allow to perform bit operations on integer values + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../common.hpp" +#include "../integer.hpp" +#include "../exponential.hpp" +#include "../ext/scalar_common.hpp" +#include "../ext/vector_common.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_integer + /// @{ + + /// Returns the log2 of x for integer values. Useful to compute mipmap count from the texture size. + /// @see gtc_integer + template + GLM_FUNC_DECL genIUType log2(genIUType x); + + /// @} +} //namespace glm + +#include "integer.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/integer.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/integer.inl new file mode 100644 index 000000000000..5f66dfe2c096 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/integer.inl @@ -0,0 +1,33 @@ +/// @ref gtc_integer + +namespace glm{ +namespace detail +{ + template + struct compute_log2 + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + //Equivalent to return findMSB(vec); but save one function call in ASM with VC + //return findMSB(vec); + return vec(detail::compute_findMSB_vec::call(v)); + } + }; + +# if GLM_HAS_BITSCAN_WINDOWS + template + struct compute_log2<4, int, Q, false, Aligned> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v) + { + vec<4, int, Q> Result; + _BitScanReverse(reinterpret_cast(&Result.x), v.x); + _BitScanReverse(reinterpret_cast(&Result.y), v.y); + _BitScanReverse(reinterpret_cast(&Result.z), v.z); + _BitScanReverse(reinterpret_cast(&Result.w), v.w); + return Result; + } + }; +# endif//GLM_HAS_BITSCAN_WINDOWS +}//namespace detail +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_access.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_access.hpp new file mode 100644 index 000000000000..4935ba755dd5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_access.hpp @@ -0,0 +1,60 @@ +/// @ref gtc_matrix_access +/// @file glm/gtc/matrix_access.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_matrix_access GLM_GTC_matrix_access +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines functions to access rows or columns of a matrix easily. + +#pragma once + +// Dependency: +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_matrix_access extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_matrix_access + /// @{ + + /// Get a specific row of a matrix. + /// @see gtc_matrix_access + template + GLM_FUNC_DECL typename genType::row_type row( + genType const& m, + length_t index); + + /// Set a specific row to a matrix. + /// @see gtc_matrix_access + template + GLM_FUNC_DECL genType row( + genType const& m, + length_t index, + typename genType::row_type const& x); + + /// Get a specific column of a matrix. + /// @see gtc_matrix_access + template + GLM_FUNC_DECL typename genType::col_type column( + genType const& m, + length_t index); + + /// Set a specific column to a matrix. + /// @see gtc_matrix_access + template + GLM_FUNC_DECL genType column( + genType const& m, + length_t index, + typename genType::col_type const& x); + + /// @} +}//namespace glm + +#include "matrix_access.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_access.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_access.inl new file mode 100644 index 000000000000..09fcc10d3d7e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_access.inl @@ -0,0 +1,62 @@ +/// @ref gtc_matrix_access + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType row + ( + genType const& m, + length_t index, + typename genType::row_type const& x + ) + { + assert(index >= 0 && index < m[0].length()); + + genType Result = m; + for(length_t i = 0; i < m.length(); ++i) + Result[i][index] = x[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER typename genType::row_type row + ( + genType const& m, + length_t index + ) + { + assert(index >= 0 && index < m[0].length()); + + typename genType::row_type Result(0); + for(length_t i = 0; i < m.length(); ++i) + Result[i] = m[i][index]; + return Result; + } + + template + GLM_FUNC_QUALIFIER genType column + ( + genType const& m, + length_t index, + typename genType::col_type const& x + ) + { + assert(index >= 0 && index < m.length()); + + genType Result = m; + Result[index] = x; + return Result; + } + + template + GLM_FUNC_QUALIFIER typename genType::col_type column + ( + genType const& m, + length_t index + ) + { + assert(index >= 0 && index < m.length()); + + return m[index]; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_integer.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_integer.hpp new file mode 100644 index 000000000000..d7ebdc719221 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_integer.hpp @@ -0,0 +1,433 @@ +/// @ref gtc_matrix_integer +/// @file glm/gtc/matrix_integer.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_matrix_integer GLM_GTC_matrix_integer +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x2.hpp" +#include "../mat2x3.hpp" +#include "../mat2x4.hpp" +#include "../mat3x2.hpp" +#include "../mat3x3.hpp" +#include "../mat3x4.hpp" +#include "../mat4x2.hpp" +#include "../mat4x3.hpp" +#include "../mat4x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_matrix_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_matrix_integer + /// @{ + + /// High-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, highp> highp_imat2; + + /// High-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, highp> highp_imat3; + + /// High-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, highp> highp_imat4; + + /// High-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, highp> highp_imat2x2; + + /// High-qualifier signed integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, int, highp> highp_imat2x3; + + /// High-qualifier signed integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, int, highp> highp_imat2x4; + + /// High-qualifier signed integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, int, highp> highp_imat3x2; + + /// High-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, highp> highp_imat3x3; + + /// High-qualifier signed integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, int, highp> highp_imat3x4; + + /// High-qualifier signed integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, int, highp> highp_imat4x2; + + /// High-qualifier signed integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, int, highp> highp_imat4x3; + + /// High-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, highp> highp_imat4x4; + + + /// Medium-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, mediump> mediump_imat2; + + /// Medium-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, mediump> mediump_imat3; + + /// Medium-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, mediump> mediump_imat4; + + + /// Medium-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, mediump> mediump_imat2x2; + + /// Medium-qualifier signed integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, int, mediump> mediump_imat2x3; + + /// Medium-qualifier signed integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, int, mediump> mediump_imat2x4; + + /// Medium-qualifier signed integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, int, mediump> mediump_imat3x2; + + /// Medium-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, mediump> mediump_imat3x3; + + /// Medium-qualifier signed integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, int, mediump> mediump_imat3x4; + + /// Medium-qualifier signed integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, int, mediump> mediump_imat4x2; + + /// Medium-qualifier signed integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, int, mediump> mediump_imat4x3; + + /// Medium-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, mediump> mediump_imat4x4; + + + /// Low-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, lowp> lowp_imat2; + + /// Low-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, lowp> lowp_imat3; + + /// Low-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, lowp> lowp_imat4; + + + /// Low-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, lowp> lowp_imat2x2; + + /// Low-qualifier signed integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, int, lowp> lowp_imat2x3; + + /// Low-qualifier signed integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, int, lowp> lowp_imat2x4; + + /// Low-qualifier signed integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, int, lowp> lowp_imat3x2; + + /// Low-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, lowp> lowp_imat3x3; + + /// Low-qualifier signed integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, int, lowp> lowp_imat3x4; + + /// Low-qualifier signed integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, int, lowp> lowp_imat4x2; + + /// Low-qualifier signed integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, int, lowp> lowp_imat4x3; + + /// Low-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, lowp> lowp_imat4x4; + + + /// High-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, highp> highp_umat2; + + /// High-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, highp> highp_umat3; + + /// High-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, highp> highp_umat4; + + /// High-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, highp> highp_umat2x2; + + /// High-qualifier unsigned integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, uint, highp> highp_umat2x3; + + /// High-qualifier unsigned integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, uint, highp> highp_umat2x4; + + /// High-qualifier unsigned integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, uint, highp> highp_umat3x2; + + /// High-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, highp> highp_umat3x3; + + /// High-qualifier unsigned integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, uint, highp> highp_umat3x4; + + /// High-qualifier unsigned integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, uint, highp> highp_umat4x2; + + /// High-qualifier unsigned integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, uint, highp> highp_umat4x3; + + /// High-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, highp> highp_umat4x4; + + + /// Medium-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, mediump> mediump_umat2; + + /// Medium-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, mediump> mediump_umat3; + + /// Medium-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, mediump> mediump_umat4; + + + /// Medium-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, mediump> mediump_umat2x2; + + /// Medium-qualifier unsigned integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, uint, mediump> mediump_umat2x3; + + /// Medium-qualifier unsigned integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, uint, mediump> mediump_umat2x4; + + /// Medium-qualifier unsigned integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, uint, mediump> mediump_umat3x2; + + /// Medium-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, mediump> mediump_umat3x3; + + /// Medium-qualifier unsigned integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, uint, mediump> mediump_umat3x4; + + /// Medium-qualifier unsigned integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, uint, mediump> mediump_umat4x2; + + /// Medium-qualifier unsigned integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, uint, mediump> mediump_umat4x3; + + /// Medium-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, mediump> mediump_umat4x4; + + + /// Low-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, lowp> lowp_umat2; + + /// Low-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, lowp> lowp_umat3; + + /// Low-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, lowp> lowp_umat4; + + + /// Low-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, lowp> lowp_umat2x2; + + /// Low-qualifier unsigned integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, uint, lowp> lowp_umat2x3; + + /// Low-qualifier unsigned integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, uint, lowp> lowp_umat2x4; + + /// Low-qualifier unsigned integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, uint, lowp> lowp_umat3x2; + + /// Low-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, lowp> lowp_umat3x3; + + /// Low-qualifier unsigned integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, uint, lowp> lowp_umat3x4; + + /// Low-qualifier unsigned integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, uint, lowp> lowp_umat4x2; + + /// Low-qualifier unsigned integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, uint, lowp> lowp_umat4x3; + + /// Low-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, lowp> lowp_umat4x4; + + + + /// Signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, defaultp> imat2; + + /// Signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, defaultp> imat3; + + /// Signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, defaultp> imat4; + + /// Signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, defaultp> imat2x2; + + /// Signed integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, int, defaultp> imat2x3; + + /// Signed integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, int, defaultp> imat2x4; + + /// Signed integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, int, defaultp> imat3x2; + + /// Signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, defaultp> imat3x3; + + /// Signed integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, int, defaultp> imat3x4; + + /// Signed integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, int, defaultp> imat4x2; + + /// Signed integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, int, defaultp> imat4x3; + + /// Signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, defaultp> imat4x4; + + + + /// Unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, defaultp> umat2; + + /// Unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, defaultp> umat3; + + /// Unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, defaultp> umat4; + + /// Unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, defaultp> umat2x2; + + /// Unsigned integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, uint, defaultp> umat2x3; + + /// Unsigned integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, uint, defaultp> umat2x4; + + /// Unsigned integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, uint, defaultp> umat3x2; + + /// Unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, defaultp> umat3x3; + + /// Unsigned integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, uint, defaultp> umat3x4; + + /// Unsigned integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, uint, defaultp> umat4x2; + + /// Unsigned integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, uint, defaultp> umat4x3; + + /// Unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, defaultp> umat4x4; + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_inverse.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_inverse.hpp new file mode 100644 index 000000000000..75d53f20234d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_inverse.hpp @@ -0,0 +1,50 @@ +/// @ref gtc_matrix_inverse +/// @file glm/gtc/matrix_inverse.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_matrix_inverse GLM_GTC_matrix_inverse +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines additional matrix inverting functions. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../matrix.hpp" +#include "../mat2x2.hpp" +#include "../mat3x3.hpp" +#include "../mat4x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_matrix_inverse extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_matrix_inverse + /// @{ + + /// Fast matrix inverse for affine matrix. + /// + /// @param m Input matrix to invert. + /// @tparam genType Squared floating-point matrix: half, float or double. Inverse of matrix based of half-qualifier floating point value is highly inaccurate. + /// @see gtc_matrix_inverse + template + GLM_FUNC_DECL genType affineInverse(genType const& m); + + /// Compute the inverse transpose of a matrix. + /// + /// @param m Input matrix to invert transpose. + /// @tparam genType Squared floating-point matrix: half, float or double. Inverse of matrix based of half-qualifier floating point value is highly inaccurate. + /// @see gtc_matrix_inverse + template + GLM_FUNC_DECL genType inverseTranspose(genType const& m); + + /// @} +}//namespace glm + +#include "matrix_inverse.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_inverse.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_inverse.inl new file mode 100644 index 000000000000..c004b9e14670 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_inverse.inl @@ -0,0 +1,118 @@ +/// @ref gtc_matrix_inverse + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> affineInverse(mat<3, 3, T, Q> const& m) + { + mat<2, 2, T, Q> const Inv(inverse(mat<2, 2, T, Q>(m))); + + return mat<3, 3, T, Q>( + vec<3, T, Q>(Inv[0], static_cast(0)), + vec<3, T, Q>(Inv[1], static_cast(0)), + vec<3, T, Q>(-Inv * vec<2, T, Q>(m[2]), static_cast(1))); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> affineInverse(mat<4, 4, T, Q> const& m) + { + mat<3, 3, T, Q> const Inv(inverse(mat<3, 3, T, Q>(m))); + + return mat<4, 4, T, Q>( + vec<4, T, Q>(Inv[0], static_cast(0)), + vec<4, T, Q>(Inv[1], static_cast(0)), + vec<4, T, Q>(Inv[2], static_cast(0)), + vec<4, T, Q>(-Inv * vec<3, T, Q>(m[3]), static_cast(1))); + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> inverseTranspose(mat<2, 2, T, Q> const& m) + { + T Determinant = m[0][0] * m[1][1] - m[1][0] * m[0][1]; + + mat<2, 2, T, Q> Inverse( + + m[1][1] / Determinant, + - m[0][1] / Determinant, + - m[1][0] / Determinant, + + m[0][0] / Determinant); + + return Inverse; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> inverseTranspose(mat<3, 3, T, Q> const& m) + { + T Determinant = + + m[0][0] * (m[1][1] * m[2][2] - m[1][2] * m[2][1]) + - m[0][1] * (m[1][0] * m[2][2] - m[1][2] * m[2][0]) + + m[0][2] * (m[1][0] * m[2][1] - m[1][1] * m[2][0]); + + mat<3, 3, T, Q> Inverse; + Inverse[0][0] = + (m[1][1] * m[2][2] - m[2][1] * m[1][2]); + Inverse[0][1] = - (m[1][0] * m[2][2] - m[2][0] * m[1][2]); + Inverse[0][2] = + (m[1][0] * m[2][1] - m[2][0] * m[1][1]); + Inverse[1][0] = - (m[0][1] * m[2][2] - m[2][1] * m[0][2]); + Inverse[1][1] = + (m[0][0] * m[2][2] - m[2][0] * m[0][2]); + Inverse[1][2] = - (m[0][0] * m[2][1] - m[2][0] * m[0][1]); + Inverse[2][0] = + (m[0][1] * m[1][2] - m[1][1] * m[0][2]); + Inverse[2][1] = - (m[0][0] * m[1][2] - m[1][0] * m[0][2]); + Inverse[2][2] = + (m[0][0] * m[1][1] - m[1][0] * m[0][1]); + Inverse /= Determinant; + + return Inverse; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> inverseTranspose(mat<4, 4, T, Q> const& m) + { + T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + T SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + T SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + T SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + T SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + T SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + T SubFactor11 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + T SubFactor12 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + T SubFactor13 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + T SubFactor14 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + T SubFactor15 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + T SubFactor16 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + T SubFactor17 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + mat<4, 4, T, Q> Inverse; + Inverse[0][0] = + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02); + Inverse[0][1] = - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04); + Inverse[0][2] = + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05); + Inverse[0][3] = - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05); + + Inverse[1][0] = - (m[0][1] * SubFactor00 - m[0][2] * SubFactor01 + m[0][3] * SubFactor02); + Inverse[1][1] = + (m[0][0] * SubFactor00 - m[0][2] * SubFactor03 + m[0][3] * SubFactor04); + Inverse[1][2] = - (m[0][0] * SubFactor01 - m[0][1] * SubFactor03 + m[0][3] * SubFactor05); + Inverse[1][3] = + (m[0][0] * SubFactor02 - m[0][1] * SubFactor04 + m[0][2] * SubFactor05); + + Inverse[2][0] = + (m[0][1] * SubFactor06 - m[0][2] * SubFactor07 + m[0][3] * SubFactor08); + Inverse[2][1] = - (m[0][0] * SubFactor06 - m[0][2] * SubFactor09 + m[0][3] * SubFactor10); + Inverse[2][2] = + (m[0][0] * SubFactor07 - m[0][1] * SubFactor09 + m[0][3] * SubFactor11); + Inverse[2][3] = - (m[0][0] * SubFactor08 - m[0][1] * SubFactor10 + m[0][2] * SubFactor11); + + Inverse[3][0] = - (m[0][1] * SubFactor12 - m[0][2] * SubFactor13 + m[0][3] * SubFactor14); + Inverse[3][1] = + (m[0][0] * SubFactor12 - m[0][2] * SubFactor15 + m[0][3] * SubFactor16); + Inverse[3][2] = - (m[0][0] * SubFactor13 - m[0][1] * SubFactor15 + m[0][3] * SubFactor17); + Inverse[3][3] = + (m[0][0] * SubFactor14 - m[0][1] * SubFactor16 + m[0][2] * SubFactor17); + + T Determinant = + + m[0][0] * Inverse[0][0] + + m[0][1] * Inverse[0][1] + + m[0][2] * Inverse[0][2] + + m[0][3] * Inverse[0][3]; + + Inverse /= Determinant; + + return Inverse; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_transform.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_transform.hpp new file mode 100644 index 000000000000..612418fa51c4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_transform.hpp @@ -0,0 +1,36 @@ +/// @ref gtc_matrix_transform +/// @file glm/gtc/matrix_transform.hpp +/// +/// @see core (dependence) +/// @see gtx_transform +/// @see gtx_transform2 +/// +/// @defgroup gtc_matrix_transform GLM_GTC_matrix_transform +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines functions that generate common transformation matrices. +/// +/// The matrices generated by this extension use standard OpenGL fixed-function +/// conventions. For example, the lookAt function generates a transform from world +/// space into the specific eye space that the projective matrix functions +/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility +/// specifications defines the particular layout of this eye space. + +#pragma once + +// Dependencies +#include "../mat4x4.hpp" +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../ext/matrix_projection.hpp" +#include "../ext/matrix_clip_space.hpp" +#include "../ext/matrix_transform.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_matrix_transform extension included") +#endif + +#include "matrix_transform.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_transform.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_transform.inl new file mode 100644 index 000000000000..15b46bc9db61 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/matrix_transform.inl @@ -0,0 +1,3 @@ +#include "../geometric.hpp" +#include "../trigonometric.hpp" +#include "../matrix.hpp" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/noise.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/noise.hpp new file mode 100644 index 000000000000..ab1772e78125 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/noise.hpp @@ -0,0 +1,61 @@ +/// @ref gtc_noise +/// @file glm/gtc/noise.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_noise GLM_GTC_noise +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines 2D, 3D and 4D procedural noise functions +/// Based on the work of Stefan Gustavson and Ashima Arts on "webgl-noise": +/// https://github.com/ashima/webgl-noise +/// Following Stefan Gustavson's paper "Simplex noise demystified": +/// http://www.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_noise.hpp" +#include "../geometric.hpp" +#include "../common.hpp" +#include "../vector_relational.hpp" +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_noise extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_noise + /// @{ + + /// Classic perlin noise. + /// @see gtc_noise + template + GLM_FUNC_DECL T perlin( + vec const& p); + + /// Periodic perlin noise. + /// @see gtc_noise + template + GLM_FUNC_DECL T perlin( + vec const& p, + vec const& rep); + + /// Simplex noise. + /// @see gtc_noise + template + GLM_FUNC_DECL T simplex( + vec const& p); + + /// @} +}//namespace glm + +#include "noise.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/noise.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/noise.inl new file mode 100644 index 000000000000..a1cf399d38d4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/noise.inl @@ -0,0 +1,807 @@ +/// @ref gtc_noise +/// +// Based on the work of Stefan Gustavson and Ashima Arts on "webgl-noise": +// https://github.com/ashima/webgl-noise +// Following Stefan Gustavson's paper "Simplex noise demystified": +// http://www.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_QUALIFIER vec<4, T, Q> grad4(T const& j, vec<4, T, Q> const& ip) + { + vec<3, T, Q> pXYZ = floor(fract(vec<3, T, Q>(j) * vec<3, T, Q>(ip)) * T(7)) * ip[2] - T(1); + T pW = static_cast(1.5) - dot(abs(pXYZ), vec<3, T, Q>(1)); + vec<4, T, Q> s = vec<4, T, Q>(lessThan(vec<4, T, Q>(pXYZ, pW), vec<4, T, Q>(0.0))); + pXYZ = pXYZ + (vec<3, T, Q>(s) * T(2) - T(1)) * s.w; + return vec<4, T, Q>(pXYZ, pW); + } +}//namespace detail + + // Classic Perlin noise + template + GLM_FUNC_QUALIFIER T perlin(vec<2, T, Q> const& Position) + { + vec<4, T, Q> Pi = glm::floor(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) + vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); + vec<4, T, Q> Pf = glm::fract(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) - vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); + Pi = mod(Pi, vec<4, T, Q>(289)); // To avoid truncation effects in permutation + vec<4, T, Q> ix(Pi.x, Pi.z, Pi.x, Pi.z); + vec<4, T, Q> iy(Pi.y, Pi.y, Pi.w, Pi.w); + vec<4, T, Q> fx(Pf.x, Pf.z, Pf.x, Pf.z); + vec<4, T, Q> fy(Pf.y, Pf.y, Pf.w, Pf.w); + + vec<4, T, Q> i = detail::permute(detail::permute(ix) + iy); + + vec<4, T, Q> gx = static_cast(2) * glm::fract(i / T(41)) - T(1); + vec<4, T, Q> gy = glm::abs(gx) - T(0.5); + vec<4, T, Q> tx = glm::floor(gx + T(0.5)); + gx = gx - tx; + + vec<2, T, Q> g00(gx.x, gy.x); + vec<2, T, Q> g10(gx.y, gy.y); + vec<2, T, Q> g01(gx.z, gy.z); + vec<2, T, Q> g11(gx.w, gy.w); + + vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11))); + g00 *= norm.x; + g01 *= norm.y; + g10 *= norm.z; + g11 *= norm.w; + + T n00 = dot(g00, vec<2, T, Q>(fx.x, fy.x)); + T n10 = dot(g10, vec<2, T, Q>(fx.y, fy.y)); + T n01 = dot(g01, vec<2, T, Q>(fx.z, fy.z)); + T n11 = dot(g11, vec<2, T, Q>(fx.w, fy.w)); + + vec<2, T, Q> fade_xy = detail::fade(vec<2, T, Q>(Pf.x, Pf.y)); + vec<2, T, Q> n_x = mix(vec<2, T, Q>(n00, n01), vec<2, T, Q>(n10, n11), fade_xy.x); + T n_xy = mix(n_x.x, n_x.y, fade_xy.y); + return T(2.3) * n_xy; + } + + // Classic Perlin noise + template + GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& Position) + { + vec<3, T, Q> Pi0 = floor(Position); // Integer part for indexing + vec<3, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 + Pi0 = detail::mod289(Pi0); + Pi1 = detail::mod289(Pi1); + vec<3, T, Q> Pf0 = fract(Position); // Fractional part for interpolation + vec<3, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 + vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); + vec<4, T, Q> iy = vec<4, T, Q>(vec<2, T, Q>(Pi0.y), vec<2, T, Q>(Pi1.y)); + vec<4, T, Q> iz0(Pi0.z); + vec<4, T, Q> iz1(Pi1.z); + + vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); + vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); + vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); + + vec<4, T, Q> gx0 = ixy0 * T(1.0 / 7.0); + vec<4, T, Q> gy0 = fract(floor(gx0) * T(1.0 / 7.0)) - T(0.5); + gx0 = fract(gx0); + vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); + vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0.0)); + gx0 -= sz0 * (step(T(0), gx0) - T(0.5)); + gy0 -= sz0 * (step(T(0), gy0) - T(0.5)); + + vec<4, T, Q> gx1 = ixy1 * T(1.0 / 7.0); + vec<4, T, Q> gy1 = fract(floor(gx1) * T(1.0 / 7.0)) - T(0.5); + gx1 = fract(gx1); + vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); + vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(0.0)); + gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); + gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); + + vec<3, T, Q> g000(gx0.x, gy0.x, gz0.x); + vec<3, T, Q> g100(gx0.y, gy0.y, gz0.y); + vec<3, T, Q> g010(gx0.z, gy0.z, gz0.z); + vec<3, T, Q> g110(gx0.w, gy0.w, gz0.w); + vec<3, T, Q> g001(gx1.x, gy1.x, gz1.x); + vec<3, T, Q> g101(gx1.y, gy1.y, gz1.y); + vec<3, T, Q> g011(gx1.z, gy1.z, gz1.z); + vec<3, T, Q> g111(gx1.w, gy1.w, gz1.w); + + vec<4, T, Q> norm0 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); + g000 *= norm0.x; + g010 *= norm0.y; + g100 *= norm0.z; + g110 *= norm0.w; + vec<4, T, Q> norm1 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); + g001 *= norm1.x; + g011 *= norm1.y; + g101 *= norm1.z; + g111 *= norm1.w; + + T n000 = dot(g000, Pf0); + T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); + T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); + T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); + T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); + T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); + T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); + T n111 = dot(g111, Pf1); + + vec<3, T, Q> fade_xyz = detail::fade(Pf0); + vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); + vec<2, T, Q> n_yz = mix(vec<2, T, Q>(n_z.x, n_z.y), vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); + T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); + return T(2.2) * n_xyz; + } + /* + // Classic Perlin noise + template + GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& P) + { + vec<3, T, Q> Pi0 = floor(P); // Integer part for indexing + vec<3, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 + Pi0 = mod(Pi0, T(289)); + Pi1 = mod(Pi1, T(289)); + vec<3, T, Q> Pf0 = fract(P); // Fractional part for interpolation + vec<3, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 + vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); + vec<4, T, Q> iy(Pi0.y, Pi0.y, Pi1.y, Pi1.y); + vec<4, T, Q> iz0(Pi0.z); + vec<4, T, Q> iz1(Pi1.z); + + vec<4, T, Q> ixy = permute(permute(ix) + iy); + vec<4, T, Q> ixy0 = permute(ixy + iz0); + vec<4, T, Q> ixy1 = permute(ixy + iz1); + + vec<4, T, Q> gx0 = ixy0 / T(7); + vec<4, T, Q> gy0 = fract(floor(gx0) / T(7)) - T(0.5); + gx0 = fract(gx0); + vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); + vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0.0)); + gx0 -= sz0 * (step(0.0, gx0) - T(0.5)); + gy0 -= sz0 * (step(0.0, gy0) - T(0.5)); + + vec<4, T, Q> gx1 = ixy1 / T(7); + vec<4, T, Q> gy1 = fract(floor(gx1) / T(7)) - T(0.5); + gx1 = fract(gx1); + vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); + vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(0.0)); + gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); + gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); + + vec<3, T, Q> g000(gx0.x, gy0.x, gz0.x); + vec<3, T, Q> g100(gx0.y, gy0.y, gz0.y); + vec<3, T, Q> g010(gx0.z, gy0.z, gz0.z); + vec<3, T, Q> g110(gx0.w, gy0.w, gz0.w); + vec<3, T, Q> g001(gx1.x, gy1.x, gz1.x); + vec<3, T, Q> g101(gx1.y, gy1.y, gz1.y); + vec<3, T, Q> g011(gx1.z, gy1.z, gz1.z); + vec<3, T, Q> g111(gx1.w, gy1.w, gz1.w); + + vec<4, T, Q> norm0 = taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); + g000 *= norm0.x; + g010 *= norm0.y; + g100 *= norm0.z; + g110 *= norm0.w; + vec<4, T, Q> norm1 = taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); + g001 *= norm1.x; + g011 *= norm1.y; + g101 *= norm1.z; + g111 *= norm1.w; + + T n000 = dot(g000, Pf0); + T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); + T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); + T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); + T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); + T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); + T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); + T n111 = dot(g111, Pf1); + + vec<3, T, Q> fade_xyz = fade(Pf0); + vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); + vec<2, T, Q> n_yz = mix( + vec<2, T, Q>(n_z.x, n_z.y), + vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); + T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); + return T(2.2) * n_xyz; + } + */ + // Classic Perlin noise + template + GLM_FUNC_QUALIFIER T perlin(vec<4, T, Q> const& Position) + { + vec<4, T, Q> Pi0 = floor(Position); // Integer part for indexing + vec<4, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 + Pi0 = mod(Pi0, vec<4, T, Q>(289)); + Pi1 = mod(Pi1, vec<4, T, Q>(289)); + vec<4, T, Q> Pf0 = fract(Position); // Fractional part for interpolation + vec<4, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 + vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); + vec<4, T, Q> iy(Pi0.y, Pi0.y, Pi1.y, Pi1.y); + vec<4, T, Q> iz0(Pi0.z); + vec<4, T, Q> iz1(Pi1.z); + vec<4, T, Q> iw0(Pi0.w); + vec<4, T, Q> iw1(Pi1.w); + + vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); + vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); + vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); + vec<4, T, Q> ixy00 = detail::permute(ixy0 + iw0); + vec<4, T, Q> ixy01 = detail::permute(ixy0 + iw1); + vec<4, T, Q> ixy10 = detail::permute(ixy1 + iw0); + vec<4, T, Q> ixy11 = detail::permute(ixy1 + iw1); + + vec<4, T, Q> gx00 = ixy00 / T(7); + vec<4, T, Q> gy00 = floor(gx00) / T(7); + vec<4, T, Q> gz00 = floor(gy00) / T(6); + gx00 = fract(gx00) - T(0.5); + gy00 = fract(gy00) - T(0.5); + gz00 = fract(gz00) - T(0.5); + vec<4, T, Q> gw00 = vec<4, T, Q>(0.75) - abs(gx00) - abs(gy00) - abs(gz00); + vec<4, T, Q> sw00 = step(gw00, vec<4, T, Q>(0.0)); + gx00 -= sw00 * (step(T(0), gx00) - T(0.5)); + gy00 -= sw00 * (step(T(0), gy00) - T(0.5)); + + vec<4, T, Q> gx01 = ixy01 / T(7); + vec<4, T, Q> gy01 = floor(gx01) / T(7); + vec<4, T, Q> gz01 = floor(gy01) / T(6); + gx01 = fract(gx01) - T(0.5); + gy01 = fract(gy01) - T(0.5); + gz01 = fract(gz01) - T(0.5); + vec<4, T, Q> gw01 = vec<4, T, Q>(0.75) - abs(gx01) - abs(gy01) - abs(gz01); + vec<4, T, Q> sw01 = step(gw01, vec<4, T, Q>(0.0)); + gx01 -= sw01 * (step(T(0), gx01) - T(0.5)); + gy01 -= sw01 * (step(T(0), gy01) - T(0.5)); + + vec<4, T, Q> gx10 = ixy10 / T(7); + vec<4, T, Q> gy10 = floor(gx10) / T(7); + vec<4, T, Q> gz10 = floor(gy10) / T(6); + gx10 = fract(gx10) - T(0.5); + gy10 = fract(gy10) - T(0.5); + gz10 = fract(gz10) - T(0.5); + vec<4, T, Q> gw10 = vec<4, T, Q>(0.75) - abs(gx10) - abs(gy10) - abs(gz10); + vec<4, T, Q> sw10 = step(gw10, vec<4, T, Q>(0)); + gx10 -= sw10 * (step(T(0), gx10) - T(0.5)); + gy10 -= sw10 * (step(T(0), gy10) - T(0.5)); + + vec<4, T, Q> gx11 = ixy11 / T(7); + vec<4, T, Q> gy11 = floor(gx11) / T(7); + vec<4, T, Q> gz11 = floor(gy11) / T(6); + gx11 = fract(gx11) - T(0.5); + gy11 = fract(gy11) - T(0.5); + gz11 = fract(gz11) - T(0.5); + vec<4, T, Q> gw11 = vec<4, T, Q>(0.75) - abs(gx11) - abs(gy11) - abs(gz11); + vec<4, T, Q> sw11 = step(gw11, vec<4, T, Q>(0.0)); + gx11 -= sw11 * (step(T(0), gx11) - T(0.5)); + gy11 -= sw11 * (step(T(0), gy11) - T(0.5)); + + vec<4, T, Q> g0000(gx00.x, gy00.x, gz00.x, gw00.x); + vec<4, T, Q> g1000(gx00.y, gy00.y, gz00.y, gw00.y); + vec<4, T, Q> g0100(gx00.z, gy00.z, gz00.z, gw00.z); + vec<4, T, Q> g1100(gx00.w, gy00.w, gz00.w, gw00.w); + vec<4, T, Q> g0010(gx10.x, gy10.x, gz10.x, gw10.x); + vec<4, T, Q> g1010(gx10.y, gy10.y, gz10.y, gw10.y); + vec<4, T, Q> g0110(gx10.z, gy10.z, gz10.z, gw10.z); + vec<4, T, Q> g1110(gx10.w, gy10.w, gz10.w, gw10.w); + vec<4, T, Q> g0001(gx01.x, gy01.x, gz01.x, gw01.x); + vec<4, T, Q> g1001(gx01.y, gy01.y, gz01.y, gw01.y); + vec<4, T, Q> g0101(gx01.z, gy01.z, gz01.z, gw01.z); + vec<4, T, Q> g1101(gx01.w, gy01.w, gz01.w, gw01.w); + vec<4, T, Q> g0011(gx11.x, gy11.x, gz11.x, gw11.x); + vec<4, T, Q> g1011(gx11.y, gy11.y, gz11.y, gw11.y); + vec<4, T, Q> g0111(gx11.z, gy11.z, gz11.z, gw11.z); + vec<4, T, Q> g1111(gx11.w, gy11.w, gz11.w, gw11.w); + + vec<4, T, Q> norm00 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0000, g0000), dot(g0100, g0100), dot(g1000, g1000), dot(g1100, g1100))); + g0000 *= norm00.x; + g0100 *= norm00.y; + g1000 *= norm00.z; + g1100 *= norm00.w; + + vec<4, T, Q> norm01 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0001, g0001), dot(g0101, g0101), dot(g1001, g1001), dot(g1101, g1101))); + g0001 *= norm01.x; + g0101 *= norm01.y; + g1001 *= norm01.z; + g1101 *= norm01.w; + + vec<4, T, Q> norm10 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0010, g0010), dot(g0110, g0110), dot(g1010, g1010), dot(g1110, g1110))); + g0010 *= norm10.x; + g0110 *= norm10.y; + g1010 *= norm10.z; + g1110 *= norm10.w; + + vec<4, T, Q> norm11 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0011, g0011), dot(g0111, g0111), dot(g1011, g1011), dot(g1111, g1111))); + g0011 *= norm11.x; + g0111 *= norm11.y; + g1011 *= norm11.z; + g1111 *= norm11.w; + + T n0000 = dot(g0000, Pf0); + T n1000 = dot(g1000, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf0.w)); + T n0100 = dot(g0100, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf0.w)); + T n1100 = dot(g1100, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf0.w)); + T n0010 = dot(g0010, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf0.w)); + T n1010 = dot(g1010, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf0.w)); + T n0110 = dot(g0110, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf0.w)); + T n1110 = dot(g1110, vec<4, T, Q>(Pf1.x, Pf1.y, Pf1.z, Pf0.w)); + T n0001 = dot(g0001, vec<4, T, Q>(Pf0.x, Pf0.y, Pf0.z, Pf1.w)); + T n1001 = dot(g1001, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf1.w)); + T n0101 = dot(g0101, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf1.w)); + T n1101 = dot(g1101, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf1.w)); + T n0011 = dot(g0011, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf1.w)); + T n1011 = dot(g1011, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf1.w)); + T n0111 = dot(g0111, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf1.w)); + T n1111 = dot(g1111, Pf1); + + vec<4, T, Q> fade_xyzw = detail::fade(Pf0); + vec<4, T, Q> n_0w = mix(vec<4, T, Q>(n0000, n1000, n0100, n1100), vec<4, T, Q>(n0001, n1001, n0101, n1101), fade_xyzw.w); + vec<4, T, Q> n_1w = mix(vec<4, T, Q>(n0010, n1010, n0110, n1110), vec<4, T, Q>(n0011, n1011, n0111, n1111), fade_xyzw.w); + vec<4, T, Q> n_zw = mix(n_0w, n_1w, fade_xyzw.z); + vec<2, T, Q> n_yzw = mix(vec<2, T, Q>(n_zw.x, n_zw.y), vec<2, T, Q>(n_zw.z, n_zw.w), fade_xyzw.y); + T n_xyzw = mix(n_yzw.x, n_yzw.y, fade_xyzw.x); + return T(2.2) * n_xyzw; + } + + // Classic Perlin noise, periodic variant + template + GLM_FUNC_QUALIFIER T perlin(vec<2, T, Q> const& Position, vec<2, T, Q> const& rep) + { + vec<4, T, Q> Pi = floor(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) + vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); + vec<4, T, Q> Pf = fract(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) - vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); + Pi = mod(Pi, vec<4, T, Q>(rep.x, rep.y, rep.x, rep.y)); // To create noise with explicit period + Pi = mod(Pi, vec<4, T, Q>(289)); // To avoid truncation effects in permutation + vec<4, T, Q> ix(Pi.x, Pi.z, Pi.x, Pi.z); + vec<4, T, Q> iy(Pi.y, Pi.y, Pi.w, Pi.w); + vec<4, T, Q> fx(Pf.x, Pf.z, Pf.x, Pf.z); + vec<4, T, Q> fy(Pf.y, Pf.y, Pf.w, Pf.w); + + vec<4, T, Q> i = detail::permute(detail::permute(ix) + iy); + + vec<4, T, Q> gx = static_cast(2) * fract(i / T(41)) - T(1); + vec<4, T, Q> gy = abs(gx) - T(0.5); + vec<4, T, Q> tx = floor(gx + T(0.5)); + gx = gx - tx; + + vec<2, T, Q> g00(gx.x, gy.x); + vec<2, T, Q> g10(gx.y, gy.y); + vec<2, T, Q> g01(gx.z, gy.z); + vec<2, T, Q> g11(gx.w, gy.w); + + vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11))); + g00 *= norm.x; + g01 *= norm.y; + g10 *= norm.z; + g11 *= norm.w; + + T n00 = dot(g00, vec<2, T, Q>(fx.x, fy.x)); + T n10 = dot(g10, vec<2, T, Q>(fx.y, fy.y)); + T n01 = dot(g01, vec<2, T, Q>(fx.z, fy.z)); + T n11 = dot(g11, vec<2, T, Q>(fx.w, fy.w)); + + vec<2, T, Q> fade_xy = detail::fade(vec<2, T, Q>(Pf.x, Pf.y)); + vec<2, T, Q> n_x = mix(vec<2, T, Q>(n00, n01), vec<2, T, Q>(n10, n11), fade_xy.x); + T n_xy = mix(n_x.x, n_x.y, fade_xy.y); + return T(2.3) * n_xy; + } + + // Classic Perlin noise, periodic variant + template + GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& Position, vec<3, T, Q> const& rep) + { + vec<3, T, Q> Pi0 = mod(floor(Position), rep); // Integer part, modulo period + vec<3, T, Q> Pi1 = mod(Pi0 + vec<3, T, Q>(T(1)), rep); // Integer part + 1, mod period + Pi0 = mod(Pi0, vec<3, T, Q>(289)); + Pi1 = mod(Pi1, vec<3, T, Q>(289)); + vec<3, T, Q> Pf0 = fract(Position); // Fractional part for interpolation + vec<3, T, Q> Pf1 = Pf0 - vec<3, T, Q>(T(1)); // Fractional part - 1.0 + vec<4, T, Q> ix = vec<4, T, Q>(Pi0.x, Pi1.x, Pi0.x, Pi1.x); + vec<4, T, Q> iy = vec<4, T, Q>(Pi0.y, Pi0.y, Pi1.y, Pi1.y); + vec<4, T, Q> iz0(Pi0.z); + vec<4, T, Q> iz1(Pi1.z); + + vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); + vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); + vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); + + vec<4, T, Q> gx0 = ixy0 / T(7); + vec<4, T, Q> gy0 = fract(floor(gx0) / T(7)) - T(0.5); + gx0 = fract(gx0); + vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); + vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0)); + gx0 -= sz0 * (step(T(0), gx0) - T(0.5)); + gy0 -= sz0 * (step(T(0), gy0) - T(0.5)); + + vec<4, T, Q> gx1 = ixy1 / T(7); + vec<4, T, Q> gy1 = fract(floor(gx1) / T(7)) - T(0.5); + gx1 = fract(gx1); + vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); + vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(T(0))); + gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); + gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); + + vec<3, T, Q> g000 = vec<3, T, Q>(gx0.x, gy0.x, gz0.x); + vec<3, T, Q> g100 = vec<3, T, Q>(gx0.y, gy0.y, gz0.y); + vec<3, T, Q> g010 = vec<3, T, Q>(gx0.z, gy0.z, gz0.z); + vec<3, T, Q> g110 = vec<3, T, Q>(gx0.w, gy0.w, gz0.w); + vec<3, T, Q> g001 = vec<3, T, Q>(gx1.x, gy1.x, gz1.x); + vec<3, T, Q> g101 = vec<3, T, Q>(gx1.y, gy1.y, gz1.y); + vec<3, T, Q> g011 = vec<3, T, Q>(gx1.z, gy1.z, gz1.z); + vec<3, T, Q> g111 = vec<3, T, Q>(gx1.w, gy1.w, gz1.w); + + vec<4, T, Q> norm0 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); + g000 *= norm0.x; + g010 *= norm0.y; + g100 *= norm0.z; + g110 *= norm0.w; + vec<4, T, Q> norm1 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); + g001 *= norm1.x; + g011 *= norm1.y; + g101 *= norm1.z; + g111 *= norm1.w; + + T n000 = dot(g000, Pf0); + T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); + T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); + T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); + T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); + T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); + T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); + T n111 = dot(g111, Pf1); + + vec<3, T, Q> fade_xyz = detail::fade(Pf0); + vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); + vec<2, T, Q> n_yz = mix(vec<2, T, Q>(n_z.x, n_z.y), vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); + T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); + return T(2.2) * n_xyz; + } + + // Classic Perlin noise, periodic version + template + GLM_FUNC_QUALIFIER T perlin(vec<4, T, Q> const& Position, vec<4, T, Q> const& rep) + { + vec<4, T, Q> Pi0 = mod(floor(Position), rep); // Integer part modulo rep + vec<4, T, Q> Pi1 = mod(Pi0 + T(1), rep); // Integer part + 1 mod rep + vec<4, T, Q> Pf0 = fract(Position); // Fractional part for interpolation + vec<4, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 + vec<4, T, Q> ix = vec<4, T, Q>(Pi0.x, Pi1.x, Pi0.x, Pi1.x); + vec<4, T, Q> iy = vec<4, T, Q>(Pi0.y, Pi0.y, Pi1.y, Pi1.y); + vec<4, T, Q> iz0(Pi0.z); + vec<4, T, Q> iz1(Pi1.z); + vec<4, T, Q> iw0(Pi0.w); + vec<4, T, Q> iw1(Pi1.w); + + vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); + vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); + vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); + vec<4, T, Q> ixy00 = detail::permute(ixy0 + iw0); + vec<4, T, Q> ixy01 = detail::permute(ixy0 + iw1); + vec<4, T, Q> ixy10 = detail::permute(ixy1 + iw0); + vec<4, T, Q> ixy11 = detail::permute(ixy1 + iw1); + + vec<4, T, Q> gx00 = ixy00 / T(7); + vec<4, T, Q> gy00 = floor(gx00) / T(7); + vec<4, T, Q> gz00 = floor(gy00) / T(6); + gx00 = fract(gx00) - T(0.5); + gy00 = fract(gy00) - T(0.5); + gz00 = fract(gz00) - T(0.5); + vec<4, T, Q> gw00 = vec<4, T, Q>(0.75) - abs(gx00) - abs(gy00) - abs(gz00); + vec<4, T, Q> sw00 = step(gw00, vec<4, T, Q>(0)); + gx00 -= sw00 * (step(T(0), gx00) - T(0.5)); + gy00 -= sw00 * (step(T(0), gy00) - T(0.5)); + + vec<4, T, Q> gx01 = ixy01 / T(7); + vec<4, T, Q> gy01 = floor(gx01) / T(7); + vec<4, T, Q> gz01 = floor(gy01) / T(6); + gx01 = fract(gx01) - T(0.5); + gy01 = fract(gy01) - T(0.5); + gz01 = fract(gz01) - T(0.5); + vec<4, T, Q> gw01 = vec<4, T, Q>(0.75) - abs(gx01) - abs(gy01) - abs(gz01); + vec<4, T, Q> sw01 = step(gw01, vec<4, T, Q>(0.0)); + gx01 -= sw01 * (step(T(0), gx01) - T(0.5)); + gy01 -= sw01 * (step(T(0), gy01) - T(0.5)); + + vec<4, T, Q> gx10 = ixy10 / T(7); + vec<4, T, Q> gy10 = floor(gx10) / T(7); + vec<4, T, Q> gz10 = floor(gy10) / T(6); + gx10 = fract(gx10) - T(0.5); + gy10 = fract(gy10) - T(0.5); + gz10 = fract(gz10) - T(0.5); + vec<4, T, Q> gw10 = vec<4, T, Q>(0.75) - abs(gx10) - abs(gy10) - abs(gz10); + vec<4, T, Q> sw10 = step(gw10, vec<4, T, Q>(0.0)); + gx10 -= sw10 * (step(T(0), gx10) - T(0.5)); + gy10 -= sw10 * (step(T(0), gy10) - T(0.5)); + + vec<4, T, Q> gx11 = ixy11 / T(7); + vec<4, T, Q> gy11 = floor(gx11) / T(7); + vec<4, T, Q> gz11 = floor(gy11) / T(6); + gx11 = fract(gx11) - T(0.5); + gy11 = fract(gy11) - T(0.5); + gz11 = fract(gz11) - T(0.5); + vec<4, T, Q> gw11 = vec<4, T, Q>(0.75) - abs(gx11) - abs(gy11) - abs(gz11); + vec<4, T, Q> sw11 = step(gw11, vec<4, T, Q>(T(0))); + gx11 -= sw11 * (step(T(0), gx11) - T(0.5)); + gy11 -= sw11 * (step(T(0), gy11) - T(0.5)); + + vec<4, T, Q> g0000(gx00.x, gy00.x, gz00.x, gw00.x); + vec<4, T, Q> g1000(gx00.y, gy00.y, gz00.y, gw00.y); + vec<4, T, Q> g0100(gx00.z, gy00.z, gz00.z, gw00.z); + vec<4, T, Q> g1100(gx00.w, gy00.w, gz00.w, gw00.w); + vec<4, T, Q> g0010(gx10.x, gy10.x, gz10.x, gw10.x); + vec<4, T, Q> g1010(gx10.y, gy10.y, gz10.y, gw10.y); + vec<4, T, Q> g0110(gx10.z, gy10.z, gz10.z, gw10.z); + vec<4, T, Q> g1110(gx10.w, gy10.w, gz10.w, gw10.w); + vec<4, T, Q> g0001(gx01.x, gy01.x, gz01.x, gw01.x); + vec<4, T, Q> g1001(gx01.y, gy01.y, gz01.y, gw01.y); + vec<4, T, Q> g0101(gx01.z, gy01.z, gz01.z, gw01.z); + vec<4, T, Q> g1101(gx01.w, gy01.w, gz01.w, gw01.w); + vec<4, T, Q> g0011(gx11.x, gy11.x, gz11.x, gw11.x); + vec<4, T, Q> g1011(gx11.y, gy11.y, gz11.y, gw11.y); + vec<4, T, Q> g0111(gx11.z, gy11.z, gz11.z, gw11.z); + vec<4, T, Q> g1111(gx11.w, gy11.w, gz11.w, gw11.w); + + vec<4, T, Q> norm00 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0000, g0000), dot(g0100, g0100), dot(g1000, g1000), dot(g1100, g1100))); + g0000 *= norm00.x; + g0100 *= norm00.y; + g1000 *= norm00.z; + g1100 *= norm00.w; + + vec<4, T, Q> norm01 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0001, g0001), dot(g0101, g0101), dot(g1001, g1001), dot(g1101, g1101))); + g0001 *= norm01.x; + g0101 *= norm01.y; + g1001 *= norm01.z; + g1101 *= norm01.w; + + vec<4, T, Q> norm10 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0010, g0010), dot(g0110, g0110), dot(g1010, g1010), dot(g1110, g1110))); + g0010 *= norm10.x; + g0110 *= norm10.y; + g1010 *= norm10.z; + g1110 *= norm10.w; + + vec<4, T, Q> norm11 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0011, g0011), dot(g0111, g0111), dot(g1011, g1011), dot(g1111, g1111))); + g0011 *= norm11.x; + g0111 *= norm11.y; + g1011 *= norm11.z; + g1111 *= norm11.w; + + T n0000 = dot(g0000, Pf0); + T n1000 = dot(g1000, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf0.w)); + T n0100 = dot(g0100, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf0.w)); + T n1100 = dot(g1100, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf0.w)); + T n0010 = dot(g0010, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf0.w)); + T n1010 = dot(g1010, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf0.w)); + T n0110 = dot(g0110, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf0.w)); + T n1110 = dot(g1110, vec<4, T, Q>(Pf1.x, Pf1.y, Pf1.z, Pf0.w)); + T n0001 = dot(g0001, vec<4, T, Q>(Pf0.x, Pf0.y, Pf0.z, Pf1.w)); + T n1001 = dot(g1001, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf1.w)); + T n0101 = dot(g0101, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf1.w)); + T n1101 = dot(g1101, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf1.w)); + T n0011 = dot(g0011, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf1.w)); + T n1011 = dot(g1011, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf1.w)); + T n0111 = dot(g0111, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf1.w)); + T n1111 = dot(g1111, Pf1); + + vec<4, T, Q> fade_xyzw = detail::fade(Pf0); + vec<4, T, Q> n_0w = mix(vec<4, T, Q>(n0000, n1000, n0100, n1100), vec<4, T, Q>(n0001, n1001, n0101, n1101), fade_xyzw.w); + vec<4, T, Q> n_1w = mix(vec<4, T, Q>(n0010, n1010, n0110, n1110), vec<4, T, Q>(n0011, n1011, n0111, n1111), fade_xyzw.w); + vec<4, T, Q> n_zw = mix(n_0w, n_1w, fade_xyzw.z); + vec<2, T, Q> n_yzw = mix(vec<2, T, Q>(n_zw.x, n_zw.y), vec<2, T, Q>(n_zw.z, n_zw.w), fade_xyzw.y); + T n_xyzw = mix(n_yzw.x, n_yzw.y, fade_xyzw.x); + return T(2.2) * n_xyzw; + } + + template + GLM_FUNC_QUALIFIER T simplex(glm::vec<2, T, Q> const& v) + { + vec<4, T, Q> const C = vec<4, T, Q>( + T( 0.211324865405187), // (3.0 - sqrt(3.0)) / 6.0 + T( 0.366025403784439), // 0.5 * (sqrt(3.0) - 1.0) + T(-0.577350269189626), // -1.0 + 2.0 * C.x + T( 0.024390243902439)); // 1.0 / 41.0 + + // First corner + vec<2, T, Q> i = floor(v + dot(v, vec<2, T, Q>(C[1]))); + vec<2, T, Q> x0 = v - i + dot(i, vec<2, T, Q>(C[0])); + + // Other corners + //i1.x = step( x0.y, x0.x ); // x0.x > x0.y ? 1.0 : 0.0 + //i1.y = 1.0 - i1.x; + vec<2, T, Q> i1 = (x0.x > x0.y) ? vec<2, T, Q>(1, 0) : vec<2, T, Q>(0, 1); + // x0 = x0 - 0.0 + 0.0 * C.xx ; + // x1 = x0 - i1 + 1.0 * C.xx ; + // x2 = x0 - 1.0 + 2.0 * C.xx ; + vec<4, T, Q> x12 = vec<4, T, Q>(x0.x, x0.y, x0.x, x0.y) + vec<4, T, Q>(C.x, C.x, C.z, C.z); + x12 = vec<4, T, Q>(vec<2, T, Q>(x12) - i1, x12.z, x12.w); + + // Permutations + i = mod(i, vec<2, T, Q>(289)); // Avoid truncation effects in permutation + vec<3, T, Q> p = detail::permute( + detail::permute(i.y + vec<3, T, Q>(T(0), i1.y, T(1))) + + i.x + vec<3, T, Q>(T(0), i1.x, T(1))); + + vec<3, T, Q> m = max(vec<3, T, Q>(0.5) - vec<3, T, Q>( + dot(x0, x0), + dot(vec<2, T, Q>(x12.x, x12.y), vec<2, T, Q>(x12.x, x12.y)), + dot(vec<2, T, Q>(x12.z, x12.w), vec<2, T, Q>(x12.z, x12.w))), vec<3, T, Q>(0)); + m = m * m ; + m = m * m ; + + // Gradients: 41 points uniformly over a line, mapped onto a diamond. + // The ring size 17*17 = 289 is close to a multiple of 41 (41*7 = 287) + + vec<3, T, Q> x = static_cast(2) * fract(p * C.w) - T(1); + vec<3, T, Q> h = abs(x) - T(0.5); + vec<3, T, Q> ox = floor(x + T(0.5)); + vec<3, T, Q> a0 = x - ox; + + // Normalise gradients implicitly by scaling m + // Inlined for speed: m *= taylorInvSqrt( a0*a0 + h*h ); + m *= static_cast(1.79284291400159) - T(0.85373472095314) * (a0 * a0 + h * h); + + // Compute final noise value at P + vec<3, T, Q> g; + g.x = a0.x * x0.x + h.x * x0.y; + //g.yz = a0.yz * x12.xz + h.yz * x12.yw; + g.y = a0.y * x12.x + h.y * x12.y; + g.z = a0.z * x12.z + h.z * x12.w; + return T(130) * dot(m, g); + } + + template + GLM_FUNC_QUALIFIER T simplex(vec<3, T, Q> const& v) + { + vec<2, T, Q> const C(1.0 / 6.0, 1.0 / 3.0); + vec<4, T, Q> const D(0.0, 0.5, 1.0, 2.0); + + // First corner + vec<3, T, Q> i(floor(v + dot(v, vec<3, T, Q>(C.y)))); + vec<3, T, Q> x0(v - i + dot(i, vec<3, T, Q>(C.x))); + + // Other corners + vec<3, T, Q> g(step(vec<3, T, Q>(x0.y, x0.z, x0.x), x0)); + vec<3, T, Q> l(T(1) - g); + vec<3, T, Q> i1(min(g, vec<3, T, Q>(l.z, l.x, l.y))); + vec<3, T, Q> i2(max(g, vec<3, T, Q>(l.z, l.x, l.y))); + + // x0 = x0 - 0.0 + 0.0 * C.xxx; + // x1 = x0 - i1 + 1.0 * C.xxx; + // x2 = x0 - i2 + 2.0 * C.xxx; + // x3 = x0 - 1.0 + 3.0 * C.xxx; + vec<3, T, Q> x1(x0 - i1 + C.x); + vec<3, T, Q> x2(x0 - i2 + C.y); // 2.0*C.x = 1/3 = C.y + vec<3, T, Q> x3(x0 - D.y); // -1.0+3.0*C.x = -0.5 = -D.y + + // Permutations + i = detail::mod289(i); + vec<4, T, Q> p(detail::permute(detail::permute(detail::permute( + i.z + vec<4, T, Q>(T(0), i1.z, i2.z, T(1))) + + i.y + vec<4, T, Q>(T(0), i1.y, i2.y, T(1))) + + i.x + vec<4, T, Q>(T(0), i1.x, i2.x, T(1)))); + + // Gradients: 7x7 points over a square, mapped onto an octahedron. + // The ring size 17*17 = 289 is close to a multiple of 49 (49*6 = 294) + T n_ = static_cast(0.142857142857); // 1.0/7.0 + vec<3, T, Q> ns(n_ * vec<3, T, Q>(D.w, D.y, D.z) - vec<3, T, Q>(D.x, D.z, D.x)); + + vec<4, T, Q> j(p - T(49) * floor(p * ns.z * ns.z)); // mod(p,7*7) + + vec<4, T, Q> x_(floor(j * ns.z)); + vec<4, T, Q> y_(floor(j - T(7) * x_)); // mod(j,N) + + vec<4, T, Q> x(x_ * ns.x + ns.y); + vec<4, T, Q> y(y_ * ns.x + ns.y); + vec<4, T, Q> h(T(1) - abs(x) - abs(y)); + + vec<4, T, Q> b0(x.x, x.y, y.x, y.y); + vec<4, T, Q> b1(x.z, x.w, y.z, y.w); + + // vec4 s0 = vec4(lessThan(b0,0.0))*2.0 - 1.0; + // vec4 s1 = vec4(lessThan(b1,0.0))*2.0 - 1.0; + vec<4, T, Q> s0(floor(b0) * T(2) + T(1)); + vec<4, T, Q> s1(floor(b1) * T(2) + T(1)); + vec<4, T, Q> sh(-step(h, vec<4, T, Q>(0.0))); + + vec<4, T, Q> a0 = vec<4, T, Q>(b0.x, b0.z, b0.y, b0.w) + vec<4, T, Q>(s0.x, s0.z, s0.y, s0.w) * vec<4, T, Q>(sh.x, sh.x, sh.y, sh.y); + vec<4, T, Q> a1 = vec<4, T, Q>(b1.x, b1.z, b1.y, b1.w) + vec<4, T, Q>(s1.x, s1.z, s1.y, s1.w) * vec<4, T, Q>(sh.z, sh.z, sh.w, sh.w); + + vec<3, T, Q> p0(a0.x, a0.y, h.x); + vec<3, T, Q> p1(a0.z, a0.w, h.y); + vec<3, T, Q> p2(a1.x, a1.y, h.z); + vec<3, T, Q> p3(a1.z, a1.w, h.w); + + // Normalise gradients + vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3))); + p0 *= norm.x; + p1 *= norm.y; + p2 *= norm.z; + p3 *= norm.w; + + // Mix final noise value + vec<4, T, Q> m = max(T(0.6) - vec<4, T, Q>(dot(x0, x0), dot(x1, x1), dot(x2, x2), dot(x3, x3)), vec<4, T, Q>(0)); + m = m * m; + return T(42) * dot(m * m, vec<4, T, Q>(dot(p0, x0), dot(p1, x1), dot(p2, x2), dot(p3, x3))); + } + + template + GLM_FUNC_QUALIFIER T simplex(vec<4, T, Q> const& v) + { + vec<4, T, Q> const C( + 0.138196601125011, // (5 - sqrt(5))/20 G4 + 0.276393202250021, // 2 * G4 + 0.414589803375032, // 3 * G4 + -0.447213595499958); // -1 + 4 * G4 + + // (sqrt(5) - 1)/4 = F4, used once below + T const F4 = static_cast(0.309016994374947451); + + // First corner + vec<4, T, Q> i = floor(v + dot(v, vec<4, T, Q>(F4))); + vec<4, T, Q> x0 = v - i + dot(i, vec<4, T, Q>(C.x)); + + // Other corners + + // Rank sorting originally contributed by Bill Licea-Kane, AMD (formerly ATI) + vec<4, T, Q> i0; + vec<3, T, Q> isX = step(vec<3, T, Q>(x0.y, x0.z, x0.w), vec<3, T, Q>(x0.x)); + vec<3, T, Q> isYZ = step(vec<3, T, Q>(x0.z, x0.w, x0.w), vec<3, T, Q>(x0.y, x0.y, x0.z)); + // i0.x = dot(isX, vec3(1.0)); + //i0.x = isX.x + isX.y + isX.z; + //i0.yzw = static_cast(1) - isX; + i0 = vec<4, T, Q>(isX.x + isX.y + isX.z, T(1) - isX); + // i0.y += dot(isYZ.xy, vec2(1.0)); + i0.y += isYZ.x + isYZ.y; + //i0.zw += 1.0 - vec<2, T, Q>(isYZ.x, isYZ.y); + i0.z += static_cast(1) - isYZ.x; + i0.w += static_cast(1) - isYZ.y; + i0.z += isYZ.z; + i0.w += static_cast(1) - isYZ.z; + + // i0 now contains the unique values 0,1,2,3 in each channel + vec<4, T, Q> i3 = clamp(i0, T(0), T(1)); + vec<4, T, Q> i2 = clamp(i0 - T(1), T(0), T(1)); + vec<4, T, Q> i1 = clamp(i0 - T(2), T(0), T(1)); + + // x0 = x0 - 0.0 + 0.0 * C.xxxx + // x1 = x0 - i1 + 0.0 * C.xxxx + // x2 = x0 - i2 + 0.0 * C.xxxx + // x3 = x0 - i3 + 0.0 * C.xxxx + // x4 = x0 - 1.0 + 4.0 * C.xxxx + vec<4, T, Q> x1 = x0 - i1 + C.x; + vec<4, T, Q> x2 = x0 - i2 + C.y; + vec<4, T, Q> x3 = x0 - i3 + C.z; + vec<4, T, Q> x4 = x0 + C.w; + + // Permutations + i = mod(i, vec<4, T, Q>(289)); + T j0 = detail::permute(detail::permute(detail::permute(detail::permute(i.w) + i.z) + i.y) + i.x); + vec<4, T, Q> j1 = detail::permute(detail::permute(detail::permute(detail::permute( + i.w + vec<4, T, Q>(i1.w, i2.w, i3.w, T(1))) + + i.z + vec<4, T, Q>(i1.z, i2.z, i3.z, T(1))) + + i.y + vec<4, T, Q>(i1.y, i2.y, i3.y, T(1))) + + i.x + vec<4, T, Q>(i1.x, i2.x, i3.x, T(1))); + + // Gradients: 7x7x6 points over a cube, mapped onto a 4-cross polytope + // 7*7*6 = 294, which is close to the ring size 17*17 = 289. + vec<4, T, Q> ip = vec<4, T, Q>(T(1) / T(294), T(1) / T(49), T(1) / T(7), T(0)); + + vec<4, T, Q> p0 = detail::grad4(j0, ip); + vec<4, T, Q> p1 = detail::grad4(j1.x, ip); + vec<4, T, Q> p2 = detail::grad4(j1.y, ip); + vec<4, T, Q> p3 = detail::grad4(j1.z, ip); + vec<4, T, Q> p4 = detail::grad4(j1.w, ip); + + // Normalise gradients + vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3))); + p0 *= norm.x; + p1 *= norm.y; + p2 *= norm.z; + p3 *= norm.w; + p4 *= detail::taylorInvSqrt(dot(p4, p4)); + + // Mix contributions from the five corners + vec<3, T, Q> m0 = max(T(0.6) - vec<3, T, Q>(dot(x0, x0), dot(x1, x1), dot(x2, x2)), vec<3, T, Q>(0)); + vec<2, T, Q> m1 = max(T(0.6) - vec<2, T, Q>(dot(x3, x3), dot(x4, x4) ), vec<2, T, Q>(0)); + m0 = m0 * m0; + m1 = m1 * m1; + return T(49) * + (dot(m0 * m0, vec<3, T, Q>(dot(p0, x0), dot(p1, x1), dot(p2, x2))) + + dot(m1 * m1, vec<2, T, Q>(dot(p3, x3), dot(p4, x4)))); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/packing.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/packing.hpp new file mode 100644 index 000000000000..8e416b3fe1b5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/packing.hpp @@ -0,0 +1,728 @@ +/// @ref gtc_packing +/// @file glm/gtc/packing.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_packing GLM_GTC_packing +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// This extension provides a set of function to convert vertors to packed +/// formats. + +#pragma once + +// Dependency: +#include "type_precision.hpp" +#include "../ext/vector_packing.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_packing extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_packing + /// @{ + + /// First, converts the normalized floating-point value v into a 8-bit integer value. + /// Then, the results are packed into the returned 8-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm1x8: round(clamp(c, 0, +1) * 255.0) + /// + /// @see gtc_packing + /// @see uint16 packUnorm2x8(vec2 const& v) + /// @see uint32 packUnorm4x8(vec4 const& v) + /// @see GLSL packUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint8 packUnorm1x8(float v); + + /// Convert a single 8-bit integer to a normalized floating-point value. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnorm4x8: f / 255.0 + /// + /// @see gtc_packing + /// @see vec2 unpackUnorm2x8(uint16 p) + /// @see vec4 unpackUnorm4x8(uint32 p) + /// @see GLSL unpackUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL float unpackUnorm1x8(uint8 p); + + /// First, converts each component of the normalized floating-point value v into 8-bit integer values. + /// Then, the results are packed into the returned 16-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm2x8: round(clamp(c, 0, +1) * 255.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see gtc_packing + /// @see uint8 packUnorm1x8(float const& v) + /// @see uint32 packUnorm4x8(vec4 const& v) + /// @see GLSL packUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint16 packUnorm2x8(vec2 const& v); + + /// First, unpacks a single 16-bit unsigned integer p into a pair of 8-bit unsigned integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnorm4x8: f / 255.0 + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see float unpackUnorm1x8(uint8 v) + /// @see vec4 unpackUnorm4x8(uint32 p) + /// @see GLSL unpackUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec2 unpackUnorm2x8(uint16 p); + + /// First, converts the normalized floating-point value v into 8-bit integer value. + /// Then, the results are packed into the returned 8-bit unsigned integer. + /// + /// The conversion to fixed point is done as follows: + /// packSnorm1x8: round(clamp(s, -1, +1) * 127.0) + /// + /// @see gtc_packing + /// @see uint16 packSnorm2x8(vec2 const& v) + /// @see uint32 packSnorm4x8(vec4 const& v) + /// @see GLSL packSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint8 packSnorm1x8(float s); + + /// First, unpacks a single 8-bit unsigned integer p into a single 8-bit signed integers. + /// Then, the value is converted to a normalized floating-point value to generate the returned scalar. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm1x8: clamp(f / 127.0, -1, +1) + /// + /// @see gtc_packing + /// @see vec2 unpackSnorm2x8(uint16 p) + /// @see vec4 unpackSnorm4x8(uint32 p) + /// @see GLSL unpackSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL float unpackSnorm1x8(uint8 p); + + /// First, converts each component of the normalized floating-point value v into 8-bit integer values. + /// Then, the results are packed into the returned 16-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packSnorm2x8: round(clamp(c, -1, +1) * 127.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see gtc_packing + /// @see uint8 packSnorm1x8(float const& v) + /// @see uint32 packSnorm4x8(vec4 const& v) + /// @see GLSL packSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint16 packSnorm2x8(vec2 const& v); + + /// First, unpacks a single 16-bit unsigned integer p into a pair of 8-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm2x8: clamp(f / 127.0, -1, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see float unpackSnorm1x8(uint8 p) + /// @see vec4 unpackSnorm4x8(uint32 p) + /// @see GLSL unpackSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec2 unpackSnorm2x8(uint16 p); + + /// First, converts the normalized floating-point value v into a 16-bit integer value. + /// Then, the results are packed into the returned 16-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm1x16: round(clamp(c, 0, +1) * 65535.0) + /// + /// @see gtc_packing + /// @see uint16 packSnorm1x16(float const& v) + /// @see uint64 packSnorm4x16(vec4 const& v) + /// @see GLSL packUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint16 packUnorm1x16(float v); + + /// First, unpacks a single 16-bit unsigned integer p into a of 16-bit unsigned integers. + /// Then, the value is converted to a normalized floating-point value to generate the returned scalar. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnorm1x16: f / 65535.0 + /// + /// @see gtc_packing + /// @see vec2 unpackUnorm2x16(uint32 p) + /// @see vec4 unpackUnorm4x16(uint64 p) + /// @see GLSL unpackUnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL float unpackUnorm1x16(uint16 p); + + /// First, converts each component of the normalized floating-point value v into 16-bit integer values. + /// Then, the results are packed into the returned 64-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm4x16: round(clamp(c, 0, +1) * 65535.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see gtc_packing + /// @see uint16 packUnorm1x16(float const& v) + /// @see uint32 packUnorm2x16(vec2 const& v) + /// @see GLSL packUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint64 packUnorm4x16(vec4 const& v); + + /// First, unpacks a single 64-bit unsigned integer p into four 16-bit unsigned integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnormx4x16: f / 65535.0 + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see float unpackUnorm1x16(uint16 p) + /// @see vec2 unpackUnorm2x16(uint32 p) + /// @see GLSL unpackUnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec4 unpackUnorm4x16(uint64 p); + + /// First, converts the normalized floating-point value v into 16-bit integer value. + /// Then, the results are packed into the returned 16-bit unsigned integer. + /// + /// The conversion to fixed point is done as follows: + /// packSnorm1x8: round(clamp(s, -1, +1) * 32767.0) + /// + /// @see gtc_packing + /// @see uint32 packSnorm2x16(vec2 const& v) + /// @see uint64 packSnorm4x16(vec4 const& v) + /// @see GLSL packSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint16 packSnorm1x16(float v); + + /// First, unpacks a single 16-bit unsigned integer p into a single 16-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned scalar. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm1x16: clamp(f / 32767.0, -1, +1) + /// + /// @see gtc_packing + /// @see vec2 unpackSnorm2x16(uint32 p) + /// @see vec4 unpackSnorm4x16(uint64 p) + /// @see GLSL unpackSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL float unpackSnorm1x16(uint16 p); + + /// First, converts each component of the normalized floating-point value v into 16-bit integer values. + /// Then, the results are packed into the returned 64-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packSnorm2x8: round(clamp(c, -1, +1) * 32767.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see gtc_packing + /// @see uint16 packSnorm1x16(float const& v) + /// @see uint32 packSnorm2x16(vec2 const& v) + /// @see GLSL packSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint64 packSnorm4x16(vec4 const& v); + + /// First, unpacks a single 64-bit unsigned integer p into four 16-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm4x16: clamp(f / 32767.0, -1, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see float unpackSnorm1x16(uint16 p) + /// @see vec2 unpackSnorm2x16(uint32 p) + /// @see GLSL unpackSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec4 unpackSnorm4x16(uint64 p); + + /// Returns an unsigned integer obtained by converting the components of a floating-point scalar + /// to the 16-bit floating-point representation found in the OpenGL Specification, + /// and then packing this 16-bit value into a 16-bit unsigned integer. + /// + /// @see gtc_packing + /// @see uint32 packHalf2x16(vec2 const& v) + /// @see uint64 packHalf4x16(vec4 const& v) + /// @see GLSL packHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint16 packHalf1x16(float v); + + /// Returns a floating-point scalar with components obtained by unpacking a 16-bit unsigned integer into a 16-bit value, + /// interpreted as a 16-bit floating-point number according to the OpenGL Specification, + /// and converting it to 32-bit floating-point values. + /// + /// @see gtc_packing + /// @see vec2 unpackHalf2x16(uint32 const& v) + /// @see vec4 unpackHalf4x16(uint64 const& v) + /// @see GLSL unpackHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL float unpackHalf1x16(uint16 v); + + /// Returns an unsigned integer obtained by converting the components of a four-component floating-point vector + /// to the 16-bit floating-point representation found in the OpenGL Specification, + /// and then packing these four 16-bit values into a 64-bit unsigned integer. + /// The first vector component specifies the 16 least-significant bits of the result; + /// the forth component specifies the 16 most-significant bits. + /// + /// @see gtc_packing + /// @see uint16 packHalf1x16(float const& v) + /// @see uint32 packHalf2x16(vec2 const& v) + /// @see GLSL packHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint64 packHalf4x16(vec4 const& v); + + /// Returns a four-component floating-point vector with components obtained by unpacking a 64-bit unsigned integer into four 16-bit values, + /// interpreting those values as 16-bit floating-point numbers according to the OpenGL Specification, + /// and converting them to 32-bit floating-point values. + /// The first component of the vector is obtained from the 16 least-significant bits of v; + /// the forth component is obtained from the 16 most-significant bits of v. + /// + /// @see gtc_packing + /// @see float unpackHalf1x16(uint16 const& v) + /// @see vec2 unpackHalf2x16(uint32 const& v) + /// @see GLSL unpackHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec4 unpackHalf4x16(uint64 p); + + /// Returns an unsigned integer obtained by converting the components of a four-component signed integer vector + /// to the 10-10-10-2-bit signed integer representation found in the OpenGL Specification, + /// and then packing these four values into a 32-bit unsigned integer. + /// The first vector component specifies the 10 least-significant bits of the result; + /// the forth component specifies the 2 most-significant bits. + /// + /// @see gtc_packing + /// @see uint32 packI3x10_1x2(uvec4 const& v) + /// @see uint32 packSnorm3x10_1x2(vec4 const& v) + /// @see uint32 packUnorm3x10_1x2(vec4 const& v) + /// @see ivec4 unpackI3x10_1x2(uint32 const& p) + GLM_FUNC_DECL uint32 packI3x10_1x2(ivec4 const& v); + + /// Unpacks a single 32-bit unsigned integer p into three 10-bit and one 2-bit signed integers. + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see uint32 packU3x10_1x2(uvec4 const& v) + /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p); + /// @see uvec4 unpackI3x10_1x2(uint32 const& p); + GLM_FUNC_DECL ivec4 unpackI3x10_1x2(uint32 p); + + /// Returns an unsigned integer obtained by converting the components of a four-component unsigned integer vector + /// to the 10-10-10-2-bit unsigned integer representation found in the OpenGL Specification, + /// and then packing these four values into a 32-bit unsigned integer. + /// The first vector component specifies the 10 least-significant bits of the result; + /// the forth component specifies the 2 most-significant bits. + /// + /// @see gtc_packing + /// @see uint32 packI3x10_1x2(ivec4 const& v) + /// @see uint32 packSnorm3x10_1x2(vec4 const& v) + /// @see uint32 packUnorm3x10_1x2(vec4 const& v) + /// @see ivec4 unpackU3x10_1x2(uint32 const& p) + GLM_FUNC_DECL uint32 packU3x10_1x2(uvec4 const& v); + + /// Unpacks a single 32-bit unsigned integer p into three 10-bit and one 2-bit unsigned integers. + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see uint32 packU3x10_1x2(uvec4 const& v) + /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p); + /// @see uvec4 unpackI3x10_1x2(uint32 const& p); + GLM_FUNC_DECL uvec4 unpackU3x10_1x2(uint32 p); + + /// First, converts the first three components of the normalized floating-point value v into 10-bit signed integer values. + /// Then, converts the forth component of the normalized floating-point value v into 2-bit signed integer values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packSnorm3x10_1x2(xyz): round(clamp(c, -1, +1) * 511.0) + /// packSnorm3x10_1x2(w): round(clamp(c, -1, +1) * 1.0) + /// + /// The first vector component specifies the 10 least-significant bits of the result; + /// the forth component specifies the 2 most-significant bits. + /// + /// @see gtc_packing + /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p) + /// @see uint32 packUnorm3x10_1x2(vec4 const& v) + /// @see uint32 packU3x10_1x2(uvec4 const& v) + /// @see uint32 packI3x10_1x2(ivec4 const& v) + GLM_FUNC_DECL uint32 packSnorm3x10_1x2(vec4 const& v); + + /// First, unpacks a single 32-bit unsigned integer p into four 16-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm3x10_1x2(xyz): clamp(f / 511.0, -1, +1) + /// unpackSnorm3x10_1x2(w): clamp(f / 511.0, -1, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see uint32 packSnorm3x10_1x2(vec4 const& v) + /// @see vec4 unpackUnorm3x10_1x2(uint32 const& p)) + /// @see uvec4 unpackI3x10_1x2(uint32 const& p) + /// @see uvec4 unpackU3x10_1x2(uint32 const& p) + GLM_FUNC_DECL vec4 unpackSnorm3x10_1x2(uint32 p); + + /// First, converts the first three components of the normalized floating-point value v into 10-bit unsigned integer values. + /// Then, converts the forth component of the normalized floating-point value v into 2-bit signed uninteger values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm3x10_1x2(xyz): round(clamp(c, 0, +1) * 1023.0) + /// packUnorm3x10_1x2(w): round(clamp(c, 0, +1) * 3.0) + /// + /// The first vector component specifies the 10 least-significant bits of the result; + /// the forth component specifies the 2 most-significant bits. + /// + /// @see gtc_packing + /// @see vec4 unpackUnorm3x10_1x2(uint32 const& p) + /// @see uint32 packUnorm3x10_1x2(vec4 const& v) + /// @see uint32 packU3x10_1x2(uvec4 const& v) + /// @see uint32 packI3x10_1x2(ivec4 const& v) + GLM_FUNC_DECL uint32 packUnorm3x10_1x2(vec4 const& v); + + /// First, unpacks a single 32-bit unsigned integer p into four 16-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm3x10_1x2(xyz): clamp(f / 1023.0, 0, +1) + /// unpackSnorm3x10_1x2(w): clamp(f / 3.0, 0, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see uint32 packSnorm3x10_1x2(vec4 const& v) + /// @see vec4 unpackInorm3x10_1x2(uint32 const& p)) + /// @see uvec4 unpackI3x10_1x2(uint32 const& p) + /// @see uvec4 unpackU3x10_1x2(uint32 const& p) + GLM_FUNC_DECL vec4 unpackUnorm3x10_1x2(uint32 p); + + /// First, converts the first two components of the normalized floating-point value v into 11-bit signless floating-point values. + /// Then, converts the third component of the normalized floating-point value v into a 10-bit signless floating-point value. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The first vector component specifies the 11 least-significant bits of the result; + /// the last component specifies the 10 most-significant bits. + /// + /// @see gtc_packing + /// @see vec3 unpackF2x11_1x10(uint32 const& p) + GLM_FUNC_DECL uint32 packF2x11_1x10(vec3 const& v); + + /// First, unpacks a single 32-bit unsigned integer p into two 11-bit signless floating-point values and one 10-bit signless floating-point value . + /// Then, each component is converted to a normalized floating-point value to generate the returned three-component vector. + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see uint32 packF2x11_1x10(vec3 const& v) + GLM_FUNC_DECL vec3 unpackF2x11_1x10(uint32 p); + + + /// First, converts the first two components of the normalized floating-point value v into 11-bit signless floating-point values. + /// Then, converts the third component of the normalized floating-point value v into a 10-bit signless floating-point value. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The first vector component specifies the 11 least-significant bits of the result; + /// the last component specifies the 10 most-significant bits. + /// + /// packF3x9_E1x5 allows encoding into RGBE / RGB9E5 format + /// + /// @see gtc_packing + /// @see vec3 unpackF3x9_E1x5(uint32 const& p) + GLM_FUNC_DECL uint32 packF3x9_E1x5(vec3 const& v); + + /// First, unpacks a single 32-bit unsigned integer p into two 11-bit signless floating-point values and one 10-bit signless floating-point value . + /// Then, each component is converted to a normalized floating-point value to generate the returned three-component vector. + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// unpackF3x9_E1x5 allows decoding RGBE / RGB9E5 data + /// + /// @see gtc_packing + /// @see uint32 packF3x9_E1x5(vec3 const& v) + GLM_FUNC_DECL vec3 unpackF3x9_E1x5(uint32 p); + + /// Returns an unsigned integer vector obtained by converting the components of a floating-point vector + /// to the 16-bit floating-point representation found in the OpenGL Specification. + /// The first vector component specifies the 16 least-significant bits of the result; + /// the forth component specifies the 16 most-significant bits. + /// + /// @see gtc_packing + /// @see vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& p) + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + template + GLM_FUNC_DECL vec<4, T, Q> packRGBM(vec<3, T, Q> const& rgb); + + /// Returns a floating-point vector with components obtained by reinterpreting an integer vector as 16-bit floating-point numbers and converting them to 32-bit floating-point values. + /// The first component of the vector is obtained from the 16 least-significant bits of v; + /// the forth component is obtained from the 16 most-significant bits of v. + /// + /// @see gtc_packing + /// @see vec<4, T, Q> packRGBM(vec<3, float, Q> const& v) + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + template + GLM_FUNC_DECL vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& rgbm); + + /// Returns an unsigned integer vector obtained by converting the components of a floating-point vector + /// to the 16-bit floating-point representation found in the OpenGL Specification. + /// The first vector component specifies the 16 least-significant bits of the result; + /// the forth component specifies the 16 most-significant bits. + /// + /// @see gtc_packing + /// @see vec unpackHalf(vec const& p) + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + template + GLM_FUNC_DECL vec packHalf(vec const& v); + + /// Returns a floating-point vector with components obtained by reinterpreting an integer vector as 16-bit floating-point numbers and converting them to 32-bit floating-point values. + /// The first component of the vector is obtained from the 16 least-significant bits of v; + /// the forth component is obtained from the 16 most-significant bits of v. + /// + /// @see gtc_packing + /// @see vec packHalf(vec const& v) + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + template + GLM_FUNC_DECL vec unpackHalf(vec const& p); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec unpackUnorm(vec const& p); + template + GLM_FUNC_DECL vec packUnorm(vec const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see vec packUnorm(vec const& v) + template + GLM_FUNC_DECL vec unpackUnorm(vec const& v); + + /// Convert each component of the normalized floating-point vector into signed integer values. + /// + /// @see gtc_packing + /// @see vec unpackSnorm(vec const& p); + template + GLM_FUNC_DECL vec packSnorm(vec const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see vec packSnorm(vec const& v) + template + GLM_FUNC_DECL vec unpackSnorm(vec const& v); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec2 unpackUnorm2x4(uint8 p) + GLM_FUNC_DECL uint8 packUnorm2x4(vec2 const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see uint8 packUnorm2x4(vec2 const& v) + GLM_FUNC_DECL vec2 unpackUnorm2x4(uint8 p); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec4 unpackUnorm4x4(uint16 p) + GLM_FUNC_DECL uint16 packUnorm4x4(vec4 const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see uint16 packUnorm4x4(vec4 const& v) + GLM_FUNC_DECL vec4 unpackUnorm4x4(uint16 p); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec3 unpackUnorm1x5_1x6_1x5(uint16 p) + GLM_FUNC_DECL uint16 packUnorm1x5_1x6_1x5(vec3 const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see uint16 packUnorm1x5_1x6_1x5(vec3 const& v) + GLM_FUNC_DECL vec3 unpackUnorm1x5_1x6_1x5(uint16 p); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec4 unpackUnorm3x5_1x1(uint16 p) + GLM_FUNC_DECL uint16 packUnorm3x5_1x1(vec4 const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see uint16 packUnorm3x5_1x1(vec4 const& v) + GLM_FUNC_DECL vec4 unpackUnorm3x5_1x1(uint16 p); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec3 unpackUnorm2x3_1x2(uint8 p) + GLM_FUNC_DECL uint8 packUnorm2x3_1x2(vec3 const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see uint8 packUnorm2x3_1x2(vec3 const& v) + GLM_FUNC_DECL vec3 unpackUnorm2x3_1x2(uint8 p); + + + + /// Convert each component from an integer vector into a packed integer. + /// + /// @see gtc_packing + /// @see i8vec2 unpackInt2x8(int16 p) + GLM_FUNC_DECL int16 packInt2x8(i8vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int16 packInt2x8(i8vec2 const& v) + GLM_FUNC_DECL i8vec2 unpackInt2x8(int16 p); + + /// Convert each component from an integer vector into a packed unsigned integer. + /// + /// @see gtc_packing + /// @see u8vec2 unpackInt2x8(uint16 p) + GLM_FUNC_DECL uint16 packUint2x8(u8vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see uint16 packInt2x8(u8vec2 const& v) + GLM_FUNC_DECL u8vec2 unpackUint2x8(uint16 p); + + /// Convert each component from an integer vector into a packed integer. + /// + /// @see gtc_packing + /// @see i8vec4 unpackInt4x8(int32 p) + GLM_FUNC_DECL int32 packInt4x8(i8vec4 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int32 packInt2x8(i8vec4 const& v) + GLM_FUNC_DECL i8vec4 unpackInt4x8(int32 p); + + /// Convert each component from an integer vector into a packed unsigned integer. + /// + /// @see gtc_packing + /// @see u8vec4 unpackUint4x8(uint32 p) + GLM_FUNC_DECL uint32 packUint4x8(u8vec4 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see uint32 packUint4x8(u8vec2 const& v) + GLM_FUNC_DECL u8vec4 unpackUint4x8(uint32 p); + + /// Convert each component from an integer vector into a packed integer. + /// + /// @see gtc_packing + /// @see i16vec2 unpackInt2x16(int p) + GLM_FUNC_DECL int packInt2x16(i16vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int packInt2x16(i16vec2 const& v) + GLM_FUNC_DECL i16vec2 unpackInt2x16(int p); + + /// Convert each component from an integer vector into a packed integer. + /// + /// @see gtc_packing + /// @see i16vec4 unpackInt4x16(int64 p) + GLM_FUNC_DECL int64 packInt4x16(i16vec4 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int64 packInt4x16(i16vec4 const& v) + GLM_FUNC_DECL i16vec4 unpackInt4x16(int64 p); + + /// Convert each component from an integer vector into a packed unsigned integer. + /// + /// @see gtc_packing + /// @see u16vec2 unpackUint2x16(uint p) + GLM_FUNC_DECL uint packUint2x16(u16vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see uint packUint2x16(u16vec2 const& v) + GLM_FUNC_DECL u16vec2 unpackUint2x16(uint p); + + /// Convert each component from an integer vector into a packed unsigned integer. + /// + /// @see gtc_packing + /// @see u16vec4 unpackUint4x16(uint64 p) + GLM_FUNC_DECL uint64 packUint4x16(u16vec4 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see uint64 packUint4x16(u16vec4 const& v) + GLM_FUNC_DECL u16vec4 unpackUint4x16(uint64 p); + + /// Convert each component from an integer vector into a packed integer. + /// + /// @see gtc_packing + /// @see i32vec2 unpackInt2x32(int p) + GLM_FUNC_DECL int64 packInt2x32(i32vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int packInt2x16(i32vec2 const& v) + GLM_FUNC_DECL i32vec2 unpackInt2x32(int64 p); + + /// Convert each component from an integer vector into a packed unsigned integer. + /// + /// @see gtc_packing + /// @see u32vec2 unpackUint2x32(int p) + GLM_FUNC_DECL uint64 packUint2x32(u32vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int packUint2x16(u32vec2 const& v) + GLM_FUNC_DECL u32vec2 unpackUint2x32(uint64 p); + + /// @} +}// namespace glm + +#include "packing.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/packing.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/packing.inl new file mode 100644 index 000000000000..84ad60c70f85 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/packing.inl @@ -0,0 +1,938 @@ +/// @ref gtc_packing + +#include "../ext/scalar_relational.hpp" +#include "../ext/vector_relational.hpp" +#include "../common.hpp" +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../detail/type_half.hpp" +#include +#include + +namespace glm{ +namespace detail +{ + GLM_FUNC_QUALIFIER glm::uint16 float2half(glm::uint32 f) + { + // 10 bits => EE EEEFFFFF + // 11 bits => EEE EEFFFFFF + // Half bits => SEEEEEFF FFFFFFFF + // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF + + // 0x00007c00 => 00000000 00000000 01111100 00000000 + // 0x000003ff => 00000000 00000000 00000011 11111111 + // 0x38000000 => 00111000 00000000 00000000 00000000 + // 0x7f800000 => 01111111 10000000 00000000 00000000 + // 0x00008000 => 00000000 00000000 10000000 00000000 + return + ((f >> 16) & 0x8000) | // sign + ((((f & 0x7f800000) - 0x38000000) >> 13) & 0x7c00) | // exponential + ((f >> 13) & 0x03ff); // Mantissa + } + + GLM_FUNC_QUALIFIER glm::uint32 float2packed11(glm::uint32 f) + { + // 10 bits => EE EEEFFFFF + // 11 bits => EEE EEFFFFFF + // Half bits => SEEEEEFF FFFFFFFF + // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF + + // 0x000007c0 => 00000000 00000000 00000111 11000000 + // 0x00007c00 => 00000000 00000000 01111100 00000000 + // 0x000003ff => 00000000 00000000 00000011 11111111 + // 0x38000000 => 00111000 00000000 00000000 00000000 + // 0x7f800000 => 01111111 10000000 00000000 00000000 + // 0x00008000 => 00000000 00000000 10000000 00000000 + return + ((((f & 0x7f800000) - 0x38000000) >> 17) & 0x07c0) | // exponential + ((f >> 17) & 0x003f); // Mantissa + } + + GLM_FUNC_QUALIFIER glm::uint32 packed11ToFloat(glm::uint32 p) + { + // 10 bits => EE EEEFFFFF + // 11 bits => EEE EEFFFFFF + // Half bits => SEEEEEFF FFFFFFFF + // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF + + // 0x000007c0 => 00000000 00000000 00000111 11000000 + // 0x00007c00 => 00000000 00000000 01111100 00000000 + // 0x000003ff => 00000000 00000000 00000011 11111111 + // 0x38000000 => 00111000 00000000 00000000 00000000 + // 0x7f800000 => 01111111 10000000 00000000 00000000 + // 0x00008000 => 00000000 00000000 10000000 00000000 + return + ((((p & 0x07c0) << 17) + 0x38000000) & 0x7f800000) | // exponential + ((p & 0x003f) << 17); // Mantissa + } + + GLM_FUNC_QUALIFIER glm::uint32 float2packed10(glm::uint32 f) + { + // 10 bits => EE EEEFFFFF + // 11 bits => EEE EEFFFFFF + // Half bits => SEEEEEFF FFFFFFFF + // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF + + // 0x0000001F => 00000000 00000000 00000000 00011111 + // 0x0000003F => 00000000 00000000 00000000 00111111 + // 0x000003E0 => 00000000 00000000 00000011 11100000 + // 0x000007C0 => 00000000 00000000 00000111 11000000 + // 0x00007C00 => 00000000 00000000 01111100 00000000 + // 0x000003FF => 00000000 00000000 00000011 11111111 + // 0x38000000 => 00111000 00000000 00000000 00000000 + // 0x7f800000 => 01111111 10000000 00000000 00000000 + // 0x00008000 => 00000000 00000000 10000000 00000000 + return + ((((f & 0x7f800000) - 0x38000000) >> 18) & 0x03E0) | // exponential + ((f >> 18) & 0x001f); // Mantissa + } + + GLM_FUNC_QUALIFIER glm::uint32 packed10ToFloat(glm::uint32 p) + { + // 10 bits => EE EEEFFFFF + // 11 bits => EEE EEFFFFFF + // Half bits => SEEEEEFF FFFFFFFF + // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF + + // 0x0000001F => 00000000 00000000 00000000 00011111 + // 0x0000003F => 00000000 00000000 00000000 00111111 + // 0x000003E0 => 00000000 00000000 00000011 11100000 + // 0x000007C0 => 00000000 00000000 00000111 11000000 + // 0x00007C00 => 00000000 00000000 01111100 00000000 + // 0x000003FF => 00000000 00000000 00000011 11111111 + // 0x38000000 => 00111000 00000000 00000000 00000000 + // 0x7f800000 => 01111111 10000000 00000000 00000000 + // 0x00008000 => 00000000 00000000 10000000 00000000 + return + ((((p & 0x03E0) << 18) + 0x38000000) & 0x7f800000) | // exponential + ((p & 0x001f) << 18); // Mantissa + } + + GLM_FUNC_QUALIFIER glm::uint half2float(glm::uint h) + { + return ((h & 0x8000) << 16) | ((( h & 0x7c00) + 0x1C000) << 13) | ((h & 0x03FF) << 13); + } + + GLM_FUNC_QUALIFIER glm::uint floatTo11bit(float x) + { + if(x == 0.0f) + return 0u; + else if(glm::isnan(x)) + return ~0u; + else if(glm::isinf(x)) + return 0x1Fu << 6u; + + uint Pack = 0u; + memcpy(&Pack, &x, sizeof(Pack)); + return float2packed11(Pack); + } + + GLM_FUNC_QUALIFIER float packed11bitToFloat(glm::uint x) + { + if(x == 0) + return 0.0f; + else if(x == ((1 << 11) - 1)) + return ~0;//NaN + else if(x == (0x1f << 6)) + return ~0;//Inf + + uint Result = packed11ToFloat(x); + + float Temp = 0; + memcpy(&Temp, &Result, sizeof(Temp)); + return Temp; + } + + GLM_FUNC_QUALIFIER glm::uint floatTo10bit(float x) + { + if(x == 0.0f) + return 0u; + else if(glm::isnan(x)) + return ~0u; + else if(glm::isinf(x)) + return 0x1Fu << 5u; + + uint Pack = 0; + memcpy(&Pack, &x, sizeof(Pack)); + return float2packed10(Pack); + } + + GLM_FUNC_QUALIFIER float packed10bitToFloat(glm::uint x) + { + if(x == 0) + return 0.0f; + else if(x == ((1 << 10) - 1)) + return ~0;//NaN + else if(x == (0x1f << 5)) + return ~0;//Inf + + uint Result = packed10ToFloat(x); + + float Temp = 0; + memcpy(&Temp, &Result, sizeof(Temp)); + return Temp; + } + +// GLM_FUNC_QUALIFIER glm::uint f11_f11_f10(float x, float y, float z) +// { +// return ((floatTo11bit(x) & ((1 << 11) - 1)) << 0) | ((floatTo11bit(y) & ((1 << 11) - 1)) << 11) | ((floatTo10bit(z) & ((1 << 10) - 1)) << 22); +// } + + union u3u3u2 + { + struct + { + uint x : 3; + uint y : 3; + uint z : 2; + } data; + uint8 pack; + }; + + union u4u4 + { + struct + { + uint x : 4; + uint y : 4; + } data; + uint8 pack; + }; + + union u4u4u4u4 + { + struct + { + uint x : 4; + uint y : 4; + uint z : 4; + uint w : 4; + } data; + uint16 pack; + }; + + union u5u6u5 + { + struct + { + uint x : 5; + uint y : 6; + uint z : 5; + } data; + uint16 pack; + }; + + union u5u5u5u1 + { + struct + { + uint x : 5; + uint y : 5; + uint z : 5; + uint w : 1; + } data; + uint16 pack; + }; + + union u10u10u10u2 + { + struct + { + uint x : 10; + uint y : 10; + uint z : 10; + uint w : 2; + } data; + uint32 pack; + }; + + union i10i10i10i2 + { + struct + { + int x : 10; + int y : 10; + int z : 10; + int w : 2; + } data; + uint32 pack; + }; + + union u9u9u9e5 + { + struct + { + uint x : 9; + uint y : 9; + uint z : 9; + uint w : 5; + } data; + uint32 pack; + }; + + template + struct compute_half + {}; + + template + struct compute_half<1, Q> + { + GLM_FUNC_QUALIFIER static vec<1, uint16, Q> pack(vec<1, float, Q> const& v) + { + int16 const Unpack(detail::toFloat16(v.x)); + u16vec1 Packed; + memcpy(&Packed, &Unpack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER static vec<1, float, Q> unpack(vec<1, uint16, Q> const& v) + { + i16vec1 Unpack; + memcpy(&Unpack, &v, sizeof(Unpack)); + return vec<1, float, Q>(detail::toFloat32(v.x)); + } + }; + + template + struct compute_half<2, Q> + { + GLM_FUNC_QUALIFIER static vec<2, uint16, Q> pack(vec<2, float, Q> const& v) + { + vec<2, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y)); + u16vec2 Packed; + memcpy(&Packed, &Unpack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER static vec<2, float, Q> unpack(vec<2, uint16, Q> const& v) + { + i16vec2 Unpack; + memcpy(&Unpack, &v, sizeof(Unpack)); + return vec<2, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y)); + } + }; + + template + struct compute_half<3, Q> + { + GLM_FUNC_QUALIFIER static vec<3, uint16, Q> pack(vec<3, float, Q> const& v) + { + vec<3, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y), detail::toFloat16(v.z)); + u16vec3 Packed; + memcpy(&Packed, &Unpack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER static vec<3, float, Q> unpack(vec<3, uint16, Q> const& v) + { + i16vec3 Unpack; + memcpy(&Unpack, &v, sizeof(Unpack)); + return vec<3, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y), detail::toFloat32(v.z)); + } + }; + + template + struct compute_half<4, Q> + { + GLM_FUNC_QUALIFIER static vec<4, uint16, Q> pack(vec<4, float, Q> const& v) + { + vec<4, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y), detail::toFloat16(v.z), detail::toFloat16(v.w)); + u16vec4 Packed; + memcpy(&Packed, &Unpack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER static vec<4, float, Q> unpack(vec<4, uint16, Q> const& v) + { + i16vec4 Unpack; + memcpy(&Unpack, &v, sizeof(Unpack)); + return vec<4, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y), detail::toFloat32(v.z), detail::toFloat32(v.w)); + } + }; +}//namespace detail + + GLM_FUNC_QUALIFIER uint8 packUnorm1x8(float v) + { + return static_cast(round(clamp(v, 0.0f, 1.0f) * 255.0f)); + } + + GLM_FUNC_QUALIFIER float unpackUnorm1x8(uint8 p) + { + float const Unpack(p); + return Unpack * static_cast(0.0039215686274509803921568627451); // 1 / 255 + } + + GLM_FUNC_QUALIFIER uint16 packUnorm2x8(vec2 const& v) + { + u8vec2 const Topack(round(clamp(v, 0.0f, 1.0f) * 255.0f)); + + uint16 Unpack = 0; + memcpy(&Unpack, &Topack, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER vec2 unpackUnorm2x8(uint16 p) + { + u8vec2 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return vec2(Unpack) * float(0.0039215686274509803921568627451); // 1 / 255 + } + + GLM_FUNC_QUALIFIER uint8 packSnorm1x8(float v) + { + int8 const Topack(static_cast(round(clamp(v ,-1.0f, 1.0f) * 127.0f))); + uint8 Packed = 0; + memcpy(&Packed, &Topack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER float unpackSnorm1x8(uint8 p) + { + int8 Unpack = 0; + memcpy(&Unpack, &p, sizeof(Unpack)); + return clamp( + static_cast(Unpack) * 0.00787401574803149606299212598425f, // 1.0f / 127.0f + -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint16 packSnorm2x8(vec2 const& v) + { + i8vec2 const Topack(round(clamp(v, -1.0f, 1.0f) * 127.0f)); + uint16 Packed = 0; + memcpy(&Packed, &Topack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER vec2 unpackSnorm2x8(uint16 p) + { + i8vec2 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return clamp( + vec2(Unpack) * 0.00787401574803149606299212598425f, // 1.0f / 127.0f + -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint16 packUnorm1x16(float s) + { + return static_cast(round(clamp(s, 0.0f, 1.0f) * 65535.0f)); + } + + GLM_FUNC_QUALIFIER float unpackUnorm1x16(uint16 p) + { + float const Unpack(p); + return Unpack * 1.5259021896696421759365224689097e-5f; // 1.0 / 65535.0 + } + + GLM_FUNC_QUALIFIER uint64 packUnorm4x16(vec4 const& v) + { + u16vec4 const Topack(round(clamp(v , 0.0f, 1.0f) * 65535.0f)); + uint64 Packed = 0; + memcpy(&Packed, &Topack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER vec4 unpackUnorm4x16(uint64 p) + { + u16vec4 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return vec4(Unpack) * 1.5259021896696421759365224689097e-5f; // 1.0 / 65535.0 + } + + GLM_FUNC_QUALIFIER uint16 packSnorm1x16(float v) + { + int16 const Topack = static_cast(round(clamp(v ,-1.0f, 1.0f) * 32767.0f)); + uint16 Packed = 0; + memcpy(&Packed, &Topack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER float unpackSnorm1x16(uint16 p) + { + int16 Unpack = 0; + memcpy(&Unpack, &p, sizeof(Unpack)); + return clamp( + static_cast(Unpack) * 3.0518509475997192297128208258309e-5f, //1.0f / 32767.0f, + -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint64 packSnorm4x16(vec4 const& v) + { + i16vec4 const Topack(round(clamp(v ,-1.0f, 1.0f) * 32767.0f)); + uint64 Packed = 0; + memcpy(&Packed, &Topack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER vec4 unpackSnorm4x16(uint64 p) + { + i16vec4 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return clamp( + vec4(Unpack) * 3.0518509475997192297128208258309e-5f, //1.0f / 32767.0f, + -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint16 packHalf1x16(float v) + { + int16 const Topack(detail::toFloat16(v)); + uint16 Packed = 0; + memcpy(&Packed, &Topack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER float unpackHalf1x16(uint16 v) + { + int16 Unpack = 0; + memcpy(&Unpack, &v, sizeof(Unpack)); + return detail::toFloat32(Unpack); + } + + GLM_FUNC_QUALIFIER uint64 packHalf4x16(glm::vec4 const& v) + { + i16vec4 const Unpack( + detail::toFloat16(v.x), + detail::toFloat16(v.y), + detail::toFloat16(v.z), + detail::toFloat16(v.w)); + uint64 Packed = 0; + memcpy(&Packed, &Unpack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER glm::vec4 unpackHalf4x16(uint64 v) + { + i16vec4 Unpack; + memcpy(&Unpack, &v, sizeof(Unpack)); + return vec4( + detail::toFloat32(Unpack.x), + detail::toFloat32(Unpack.y), + detail::toFloat32(Unpack.z), + detail::toFloat32(Unpack.w)); + } + + GLM_FUNC_QUALIFIER uint32 packI3x10_1x2(ivec4 const& v) + { + detail::i10i10i10i2 Result; + Result.data.x = v.x; + Result.data.y = v.y; + Result.data.z = v.z; + Result.data.w = v.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER ivec4 unpackI3x10_1x2(uint32 v) + { + detail::i10i10i10i2 Unpack; + Unpack.pack = v; + return ivec4( + Unpack.data.x, + Unpack.data.y, + Unpack.data.z, + Unpack.data.w); + } + + GLM_FUNC_QUALIFIER uint32 packU3x10_1x2(uvec4 const& v) + { + detail::u10u10u10u2 Result; + Result.data.x = v.x; + Result.data.y = v.y; + Result.data.z = v.z; + Result.data.w = v.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER uvec4 unpackU3x10_1x2(uint32 v) + { + detail::u10u10u10u2 Unpack; + Unpack.pack = v; + return uvec4( + Unpack.data.x, + Unpack.data.y, + Unpack.data.z, + Unpack.data.w); + } + + GLM_FUNC_QUALIFIER uint32 packSnorm3x10_1x2(vec4 const& v) + { + ivec4 const Pack(round(clamp(v,-1.0f, 1.0f) * vec4(511.f, 511.f, 511.f, 1.f))); + + detail::i10i10i10i2 Result; + Result.data.x = Pack.x; + Result.data.y = Pack.y; + Result.data.z = Pack.z; + Result.data.w = Pack.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec4 unpackSnorm3x10_1x2(uint32 v) + { + detail::i10i10i10i2 Unpack; + Unpack.pack = v; + + vec4 const Result(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w); + + return clamp(Result * vec4(1.f / 511.f, 1.f / 511.f, 1.f / 511.f, 1.f), -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint32 packUnorm3x10_1x2(vec4 const& v) + { + uvec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec4(1023.f, 1023.f, 1023.f, 3.f))); + + detail::u10u10u10u2 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + Result.data.z = Unpack.z; + Result.data.w = Unpack.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec4 unpackUnorm3x10_1x2(uint32 v) + { + vec4 const ScaleFactors(1.0f / 1023.f, 1.0f / 1023.f, 1.0f / 1023.f, 1.0f / 3.f); + + detail::u10u10u10u2 Unpack; + Unpack.pack = v; + return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactors; + } + + GLM_FUNC_QUALIFIER uint32 packF2x11_1x10(vec3 const& v) + { + return + ((detail::floatTo11bit(v.x) & ((1 << 11) - 1)) << 0) | + ((detail::floatTo11bit(v.y) & ((1 << 11) - 1)) << 11) | + ((detail::floatTo10bit(v.z) & ((1 << 10) - 1)) << 22); + } + + GLM_FUNC_QUALIFIER vec3 unpackF2x11_1x10(uint32 v) + { + return vec3( + detail::packed11bitToFloat(v >> 0), + detail::packed11bitToFloat(v >> 11), + detail::packed10bitToFloat(v >> 22)); + } + + GLM_FUNC_QUALIFIER uint32 packF3x9_E1x5(vec3 const& v) + { + float const SharedExpMax = (pow(2.0f, 9.0f - 1.0f) / pow(2.0f, 9.0f)) * pow(2.0f, 31.f - 15.f); + vec3 const Color = clamp(v, 0.0f, SharedExpMax); + float const MaxColor = max(Color.x, max(Color.y, Color.z)); + + float const ExpSharedP = max(-15.f - 1.f, floor(log2(MaxColor))) + 1.0f + 15.f; + float const MaxShared = floor(MaxColor / pow(2.0f, (ExpSharedP - 15.f - 9.f)) + 0.5f); + float const ExpShared = equal(MaxShared, pow(2.0f, 9.0f), epsilon()) ? ExpSharedP + 1.0f : ExpSharedP; + + uvec3 const ColorComp(floor(Color / pow(2.f, (ExpShared - 15.f - 9.f)) + 0.5f)); + + detail::u9u9u9e5 Unpack; + Unpack.data.x = ColorComp.x; + Unpack.data.y = ColorComp.y; + Unpack.data.z = ColorComp.z; + Unpack.data.w = uint(ExpShared); + return Unpack.pack; + } + + GLM_FUNC_QUALIFIER vec3 unpackF3x9_E1x5(uint32 v) + { + detail::u9u9u9e5 Unpack; + Unpack.pack = v; + + return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * pow(2.0f, Unpack.data.w - 15.f - 9.f); + } + + // Based on Brian Karis http://graphicrants.blogspot.fr/2009/04/rgbm-color-encoding.html + template + GLM_FUNC_QUALIFIER vec<4, T, Q> packRGBM(vec<3, T, Q> const& rgb) + { + vec<3, T, Q> const Color(rgb * static_cast(1.0 / 6.0)); + T Alpha = clamp(max(max(Color.x, Color.y), max(Color.z, static_cast(1e-6))), static_cast(0), static_cast(1)); + Alpha = ceil(Alpha * static_cast(255.0)) / static_cast(255.0); + return vec<4, T, Q>(Color / Alpha, Alpha); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& rgbm) + { + return vec<3, T, Q>(rgbm.x, rgbm.y, rgbm.z) * rgbm.w * static_cast(6); + } + + template + GLM_FUNC_QUALIFIER vec packHalf(vec const& v) + { + return detail::compute_half::pack(v); + } + + template + GLM_FUNC_QUALIFIER vec unpackHalf(vec const& v) + { + return detail::compute_half::unpack(v); + } + + template + GLM_FUNC_QUALIFIER vec packUnorm(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); + + return vec(round(clamp(v, static_cast(0), static_cast(1)) * static_cast(std::numeric_limits::max()))); + } + + template + GLM_FUNC_QUALIFIER vec unpackUnorm(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); + + return vec(v) * (static_cast(1) / static_cast(std::numeric_limits::max())); + } + + template + GLM_FUNC_QUALIFIER vec packSnorm(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); + + return vec(round(clamp(v , static_cast(-1), static_cast(1)) * static_cast(std::numeric_limits::max()))); + } + + template + GLM_FUNC_QUALIFIER vec unpackSnorm(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); + + return clamp(vec(v) * (static_cast(1) / static_cast(std::numeric_limits::max())), static_cast(-1), static_cast(1)); + } + + GLM_FUNC_QUALIFIER uint8 packUnorm2x4(vec2 const& v) + { + u32vec2 const Unpack(round(clamp(v, 0.0f, 1.0f) * 15.0f)); + detail::u4u4 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec2 unpackUnorm2x4(uint8 v) + { + float const ScaleFactor(1.f / 15.f); + detail::u4u4 Unpack; + Unpack.pack = v; + return vec2(Unpack.data.x, Unpack.data.y) * ScaleFactor; + } + + GLM_FUNC_QUALIFIER uint16 packUnorm4x4(vec4 const& v) + { + u32vec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * 15.0f)); + detail::u4u4u4u4 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + Result.data.z = Unpack.z; + Result.data.w = Unpack.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec4 unpackUnorm4x4(uint16 v) + { + float const ScaleFactor(1.f / 15.f); + detail::u4u4u4u4 Unpack; + Unpack.pack = v; + return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactor; + } + + GLM_FUNC_QUALIFIER uint16 packUnorm1x5_1x6_1x5(vec3 const& v) + { + u32vec3 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec3(31.f, 63.f, 31.f))); + detail::u5u6u5 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + Result.data.z = Unpack.z; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec3 unpackUnorm1x5_1x6_1x5(uint16 v) + { + vec3 const ScaleFactor(1.f / 31.f, 1.f / 63.f, 1.f / 31.f); + detail::u5u6u5 Unpack; + Unpack.pack = v; + return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * ScaleFactor; + } + + GLM_FUNC_QUALIFIER uint16 packUnorm3x5_1x1(vec4 const& v) + { + u32vec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec4(31.f, 31.f, 31.f, 1.f))); + detail::u5u5u5u1 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + Result.data.z = Unpack.z; + Result.data.w = Unpack.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec4 unpackUnorm3x5_1x1(uint16 v) + { + vec4 const ScaleFactor(1.f / 31.f, 1.f / 31.f, 1.f / 31.f, 1.f); + detail::u5u5u5u1 Unpack; + Unpack.pack = v; + return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactor; + } + + GLM_FUNC_QUALIFIER uint8 packUnorm2x3_1x2(vec3 const& v) + { + u32vec3 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec3(7.f, 7.f, 3.f))); + detail::u3u3u2 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + Result.data.z = Unpack.z; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec3 unpackUnorm2x3_1x2(uint8 v) + { + vec3 const ScaleFactor(1.f / 7.f, 1.f / 7.f, 1.f / 3.f); + detail::u3u3u2 Unpack; + Unpack.pack = v; + return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * ScaleFactor; + } + + GLM_FUNC_QUALIFIER int16 packInt2x8(i8vec2 const& v) + { + int16 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER i8vec2 unpackInt2x8(int16 p) + { + i8vec2 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER uint16 packUint2x8(u8vec2 const& v) + { + uint16 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER u8vec2 unpackUint2x8(uint16 p) + { + u8vec2 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER int32 packInt4x8(i8vec4 const& v) + { + int32 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER i8vec4 unpackInt4x8(int32 p) + { + i8vec4 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER uint32 packUint4x8(u8vec4 const& v) + { + uint32 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER u8vec4 unpackUint4x8(uint32 p) + { + u8vec4 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER int packInt2x16(i16vec2 const& v) + { + int Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER i16vec2 unpackInt2x16(int p) + { + i16vec2 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER int64 packInt4x16(i16vec4 const& v) + { + int64 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER i16vec4 unpackInt4x16(int64 p) + { + i16vec4 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER uint packUint2x16(u16vec2 const& v) + { + uint Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER u16vec2 unpackUint2x16(uint p) + { + u16vec2 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER uint64 packUint4x16(u16vec4 const& v) + { + uint64 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER u16vec4 unpackUint4x16(uint64 p) + { + u16vec4 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER int64 packInt2x32(i32vec2 const& v) + { + int64 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER i32vec2 unpackInt2x32(int64 p) + { + i32vec2 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER uint64 packUint2x32(u32vec2 const& v) + { + uint64 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER u32vec2 unpackUint2x32(uint64 p) + { + u32vec2 Unpack; + memcpy(&Unpack, &p, sizeof(Unpack)); + return Unpack; + } +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion.hpp new file mode 100644 index 000000000000..314449ebd97e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion.hpp @@ -0,0 +1,173 @@ +/// @ref gtc_quaternion +/// @file glm/gtc/quaternion.hpp +/// +/// @see core (dependence) +/// @see gtc_constants (dependence) +/// +/// @defgroup gtc_quaternion GLM_GTC_quaternion +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines a templated quaternion type and several quaternion operations. + +#pragma once + +// Dependency: +#include "../gtc/constants.hpp" +#include "../gtc/matrix_transform.hpp" +#include "../ext/vector_relational.hpp" +#include "../ext/quaternion_common.hpp" +#include "../ext/quaternion_float.hpp" +#include "../ext/quaternion_float_precision.hpp" +#include "../ext/quaternion_double.hpp" +#include "../ext/quaternion_double_precision.hpp" +#include "../ext/quaternion_relational.hpp" +#include "../ext/quaternion_geometric.hpp" +#include "../ext/quaternion_trigonometric.hpp" +#include "../ext/quaternion_transform.hpp" +#include "../detail/type_mat3x3.hpp" +#include "../detail/type_mat4x4.hpp" +#include "../detail/type_vec3.hpp" +#include "../detail/type_vec4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_quaternion extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_quaternion + /// @{ + + /// Returns euler angles, pitch as x, yaw as y, roll as z. + /// The result is expressed in radians. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL vec<3, T, Q> eulerAngles(qua const& x); + + /// Returns roll value of euler angles expressed in radians. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL T roll(qua const& x); + + /// Returns pitch value of euler angles expressed in radians. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL T pitch(qua const& x); + + /// Returns yaw value of euler angles expressed in radians. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL T yaw(qua const& x); + + /// Converts a quaternion to a 3 * 3 matrix. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL mat<3, 3, T, Q> mat3_cast(qua const& x); + + /// Converts a quaternion to a 4 * 4 matrix. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL mat<4, 4, T, Q> mat4_cast(qua const& x); + + /// Converts a pure rotation 3 * 3 matrix to a quaternion. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL qua quat_cast(mat<3, 3, T, Q> const& x); + + /// Converts a pure rotation 4 * 4 matrix to a quaternion. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL qua quat_cast(mat<4, 4, T, Q> const& x); + + /// Returns the component-wise comparison result of x < y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_relational + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> lessThan(qua const& x, qua const& y); + + /// Returns the component-wise comparison of result x <= y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_relational + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> lessThanEqual(qua const& x, qua const& y); + + /// Returns the component-wise comparison of result x > y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_relational + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> greaterThan(qua const& x, qua const& y); + + /// Returns the component-wise comparison of result x >= y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_relational + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> greaterThanEqual(qua const& x, qua const& y); + + /// Build a look at quaternion based on the default handedness. + /// + /// @param direction Desired forward direction. Needs to be normalized. + /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). + template + GLM_FUNC_DECL qua quatLookAt( + vec<3, T, Q> const& direction, + vec<3, T, Q> const& up); + + /// Build a right-handed look at quaternion. + /// + /// @param direction Desired forward direction onto which the -z-axis gets mapped. Needs to be normalized. + /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). + template + GLM_FUNC_DECL qua quatLookAtRH( + vec<3, T, Q> const& direction, + vec<3, T, Q> const& up); + + /// Build a left-handed look at quaternion. + /// + /// @param direction Desired forward direction onto which the +z-axis gets mapped. Needs to be normalized. + /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). + template + GLM_FUNC_DECL qua quatLookAtLH( + vec<3, T, Q> const& direction, + vec<3, T, Q> const& up); + /// @} +} //namespace glm + +#include "quaternion.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion.inl new file mode 100644 index 000000000000..ea159f298819 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion.inl @@ -0,0 +1,208 @@ +#include "../trigonometric.hpp" +#include "../geometric.hpp" +#include "../exponential.hpp" +#include "epsilon.hpp" +#include + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> eulerAngles(qua const& x) + { + return vec<3, T, Q>(pitch(x), yaw(x), roll(x)); + } + + template + GLM_FUNC_QUALIFIER T roll(qua const& q) + { + T const y = static_cast(2) * (q.x * q.y + q.w * q.z); + T const x = q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z; + + if(all(equal(vec<2, T, Q>(x, y), vec<2, T, Q>(0), epsilon()))) //avoid atan2(0,0) - handle singularity - Matiis + return static_cast(0); + + return static_cast(atan(y, x)); + } + + template + GLM_FUNC_QUALIFIER T pitch(qua const& q) + { + //return T(atan(T(2) * (q.y * q.z + q.w * q.x), q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z)); + T const y = static_cast(2) * (q.y * q.z + q.w * q.x); + T const x = q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z; + + if(all(equal(vec<2, T, Q>(x, y), vec<2, T, Q>(0), epsilon()))) //avoid atan2(0,0) - handle singularity - Matiis + return static_cast(static_cast(2) * atan(q.x, q.w)); + + return static_cast(atan(y, x)); + } + + template + GLM_FUNC_QUALIFIER T yaw(qua const& q) + { + return asin(clamp(static_cast(-2) * (q.x * q.z - q.w * q.y), static_cast(-1), static_cast(1))); + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat3_cast(qua const& q) + { + mat<3, 3, T, Q> Result(T(1)); + T qxx(q.x * q.x); + T qyy(q.y * q.y); + T qzz(q.z * q.z); + T qxz(q.x * q.z); + T qxy(q.x * q.y); + T qyz(q.y * q.z); + T qwx(q.w * q.x); + T qwy(q.w * q.y); + T qwz(q.w * q.z); + + Result[0][0] = T(1) - T(2) * (qyy + qzz); + Result[0][1] = T(2) * (qxy + qwz); + Result[0][2] = T(2) * (qxz - qwy); + + Result[1][0] = T(2) * (qxy - qwz); + Result[1][1] = T(1) - T(2) * (qxx + qzz); + Result[1][2] = T(2) * (qyz + qwx); + + Result[2][0] = T(2) * (qxz + qwy); + Result[2][1] = T(2) * (qyz - qwx); + Result[2][2] = T(1) - T(2) * (qxx + qyy); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat4_cast(qua const& q) + { + return mat<4, 4, T, Q>(mat3_cast(q)); + } + + template + GLM_FUNC_QUALIFIER qua quat_cast(mat<3, 3, T, Q> const& m) + { + T fourXSquaredMinus1 = m[0][0] - m[1][1] - m[2][2]; + T fourYSquaredMinus1 = m[1][1] - m[0][0] - m[2][2]; + T fourZSquaredMinus1 = m[2][2] - m[0][0] - m[1][1]; + T fourWSquaredMinus1 = m[0][0] + m[1][1] + m[2][2]; + + int biggestIndex = 0; + T fourBiggestSquaredMinus1 = fourWSquaredMinus1; + if(fourXSquaredMinus1 > fourBiggestSquaredMinus1) + { + fourBiggestSquaredMinus1 = fourXSquaredMinus1; + biggestIndex = 1; + } + if(fourYSquaredMinus1 > fourBiggestSquaredMinus1) + { + fourBiggestSquaredMinus1 = fourYSquaredMinus1; + biggestIndex = 2; + } + if(fourZSquaredMinus1 > fourBiggestSquaredMinus1) + { + fourBiggestSquaredMinus1 = fourZSquaredMinus1; + biggestIndex = 3; + } + + T biggestVal = sqrt(fourBiggestSquaredMinus1 + static_cast(1)) * static_cast(0.5); + T mult = static_cast(0.25) / biggestVal; + + switch(biggestIndex) + { + case 0: + return qua::wxyz(biggestVal, (m[1][2] - m[2][1]) * mult, (m[2][0] - m[0][2]) * mult, (m[0][1] - m[1][0]) * mult); + case 1: + return qua::wxyz((m[1][2] - m[2][1]) * mult, biggestVal, (m[0][1] + m[1][0]) * mult, (m[2][0] + m[0][2]) * mult); + case 2: + return qua::wxyz((m[2][0] - m[0][2]) * mult, (m[0][1] + m[1][0]) * mult, biggestVal, (m[1][2] + m[2][1]) * mult); + case 3: + return qua::wxyz((m[0][1] - m[1][0]) * mult, (m[2][0] + m[0][2]) * mult, (m[1][2] + m[2][1]) * mult, biggestVal); + default: // Silence a -Wswitch-default warning in GCC. Should never actually get here. Assert is just for sanity. + assert(false); + return qua::wxyz(1, 0, 0, 0); + } + } + + template + GLM_FUNC_QUALIFIER qua quat_cast(mat<4, 4, T, Q> const& m4) + { + return quat_cast(mat<3, 3, T, Q>(m4)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> lessThan(qua const& x, qua const& y) + { + vec<4, bool, Q> Result(false, false, false, false); + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] < y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> lessThanEqual(qua const& x, qua const& y) + { + vec<4, bool, Q> Result(false, false, false, false); + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] <= y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> greaterThan(qua const& x, qua const& y) + { + vec<4, bool, Q> Result(false, false, false, false); + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] > y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> greaterThanEqual(qua const& x, qua const& y) + { + vec<4, bool, Q> Result(false, false, false, false); + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] >= y[i]; + return Result; + } + + + template + GLM_FUNC_QUALIFIER qua quatLookAt(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return quatLookAtLH(direction, up); +# else + return quatLookAtRH(direction, up); +# endif + } + + template + GLM_FUNC_QUALIFIER qua quatLookAtRH(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) + { + mat<3, 3, T, Q> Result; + + Result[2] = -direction; + vec<3, T, Q> const& Right = cross(up, Result[2]); + Result[0] = Right * inversesqrt(max(static_cast(0.00001), dot(Right, Right))); + Result[1] = cross(Result[2], Result[0]); + + return quat_cast(Result); + } + + template + GLM_FUNC_QUALIFIER qua quatLookAtLH(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) + { + mat<3, 3, T, Q> Result; + + Result[2] = direction; + vec<3, T, Q> const& Right = cross(up, Result[2]); + Result[0] = Right * inversesqrt(max(static_cast(0.00001), dot(Right, Right))); + Result[1] = cross(Result[2], Result[0]); + + return quat_cast(Result); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "quaternion_simd.inl" +#endif + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion_simd.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/quaternion_simd.inl new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/random.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/random.hpp new file mode 100644 index 000000000000..c6485bf1da38 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/random.hpp @@ -0,0 +1,82 @@ +/// @ref gtc_random +/// @file glm/gtc/random.hpp +/// +/// @see core (dependence) +/// @see gtx_random (extended) +/// +/// @defgroup gtc_random GLM_GTC_random +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Generate random number from various distribution methods. + +#pragma once + +// Dependency: +#include "../ext/scalar_int_sized.hpp" +#include "../ext/scalar_uint_sized.hpp" +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_random extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_random + /// @{ + + /// Generate random numbers in the interval [Min, Max], according a linear distribution + /// + /// @param Min Minimum value included in the sampling + /// @param Max Maximum value included in the sampling + /// @tparam genType Value type. Currently supported: float or double scalars. + /// @see gtc_random + template + GLM_FUNC_DECL genType linearRand(genType Min, genType Max); + + /// Generate random numbers in the interval [Min, Max], according a linear distribution + /// + /// @param Min Minimum value included in the sampling + /// @param Max Maximum value included in the sampling + /// @tparam T Value type. Currently supported: float or double. + /// + /// @see gtc_random + template + GLM_FUNC_DECL vec linearRand(vec const& Min, vec const& Max); + + /// Generate random numbers in the interval [Min, Max], according a gaussian distribution + /// + /// @see gtc_random + template + GLM_FUNC_DECL genType gaussRand(genType Mean, genType Deviation); + + /// Generate a random 2D vector which coordinates are regularly distributed on a circle of a given radius + /// + /// @see gtc_random + template + GLM_FUNC_DECL vec<2, T, defaultp> circularRand(T Radius); + + /// Generate a random 3D vector which coordinates are regularly distributed on a sphere of a given radius + /// + /// @see gtc_random + template + GLM_FUNC_DECL vec<3, T, defaultp> sphericalRand(T Radius); + + /// Generate a random 2D vector which coordinates are regularly distributed within the area of a disk of a given radius + /// + /// @see gtc_random + template + GLM_FUNC_DECL vec<2, T, defaultp> diskRand(T Radius); + + /// Generate a random 3D vector which coordinates are regularly distributed within the volume of a ball of a given radius + /// + /// @see gtc_random + template + GLM_FUNC_DECL vec<3, T, defaultp> ballRand(T Radius); + + /// @} +}//namespace glm + +#include "random.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/random.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/random.inl new file mode 100644 index 000000000000..249ec9f92b48 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/random.inl @@ -0,0 +1,303 @@ +#include "../geometric.hpp" +#include "../exponential.hpp" +#include "../trigonometric.hpp" +#include "../detail/type_vec1.hpp" +#include +#include +#include +#include + +namespace glm{ +namespace detail +{ + template + struct compute_rand + { + GLM_FUNC_QUALIFIER static vec call(); + }; + + template + struct compute_rand<1, uint8, P> + { + GLM_FUNC_QUALIFIER static vec<1, uint8, P> call() + { + return vec<1, uint8, P>( + static_cast(std::rand() % std::numeric_limits::max())); + } + }; + + template + struct compute_rand<2, uint8, P> + { + GLM_FUNC_QUALIFIER static vec<2, uint8, P> call() + { + return vec<2, uint8, P>( + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max()); + } + }; + + template + struct compute_rand<3, uint8, P> + { + GLM_FUNC_QUALIFIER static vec<3, uint8, P> call() + { + return vec<3, uint8, P>( + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max()); + } + }; + + template + struct compute_rand<4, uint8, P> + { + GLM_FUNC_QUALIFIER static vec<4, uint8, P> call() + { + return vec<4, uint8, P>( + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max()); + } + }; + + template + struct compute_rand + { + GLM_FUNC_QUALIFIER static vec call() + { + return + (vec(compute_rand::call()) << static_cast(8)) | + (vec(compute_rand::call()) << static_cast(0)); + } + }; + + template + struct compute_rand + { + GLM_FUNC_QUALIFIER static vec call() + { + return + (vec(compute_rand::call()) << static_cast(16)) | + (vec(compute_rand::call()) << static_cast(0)); + } + }; + + template + struct compute_rand + { + GLM_FUNC_QUALIFIER static vec call() + { + return + (vec(compute_rand::call()) << static_cast(32)) | + (vec(compute_rand::call()) << static_cast(0)); + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max); + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER genType linearRand(genType Min, genType Max) + { + return detail::compute_linearRand<1, genType, highp>::call( + vec<1, genType, highp>(Min), + vec<1, genType, highp>(Max)).x; + } + + template + GLM_FUNC_QUALIFIER vec linearRand(vec const& Min, vec const& Max) + { + return detail::compute_linearRand::call(Min, Max); + } + + template + GLM_FUNC_QUALIFIER genType gaussRand(genType Mean, genType Deviation) + { + genType w, x1, x2; + + do + { + x1 = linearRand(genType(-1), genType(1)); + x2 = linearRand(genType(-1), genType(1)); + + w = x1 * x1 + x2 * x2; + } while(w > genType(1)); + + return static_cast(x2 * Deviation * Deviation * sqrt((genType(-2) * log(w)) / w) + Mean); + } + + template + GLM_FUNC_QUALIFIER vec gaussRand(vec const& Mean, vec const& Deviation) + { + return detail::functor2::call(gaussRand, Mean, Deviation); + } + + template + GLM_FUNC_QUALIFIER vec<2, T, defaultp> diskRand(T Radius) + { + assert(Radius > static_cast(0)); + + vec<2, T, defaultp> Result(T(0)); + T LenRadius(T(0)); + + do + { + Result = linearRand( + vec<2, T, defaultp>(-Radius), + vec<2, T, defaultp>(Radius)); + LenRadius = length(Result); + } + while(LenRadius > Radius); + + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, defaultp> ballRand(T Radius) + { + assert(Radius > static_cast(0)); + + vec<3, T, defaultp> Result(T(0)); + T LenRadius(T(0)); + + do + { + Result = linearRand( + vec<3, T, defaultp>(-Radius), + vec<3, T, defaultp>(Radius)); + LenRadius = length(Result); + } + while(LenRadius > Radius); + + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, defaultp> circularRand(T Radius) + { + assert(Radius > static_cast(0)); + + T a = linearRand(T(0), static_cast(6.283185307179586476925286766559)); + return vec<2, T, defaultp>(glm::cos(a), glm::sin(a)) * Radius; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, defaultp> sphericalRand(T Radius) + { + assert(Radius > static_cast(0)); + + T theta = linearRand(T(0), T(6.283185307179586476925286766559f)); + T phi = std::acos(linearRand(T(-1.0f), T(1.0f))); + + T x = std::sin(phi) * std::cos(theta); + T y = std::sin(phi) * std::sin(theta); + T z = std::cos(phi); + + return vec<3, T, defaultp>(x, y, z) * Radius; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/reciprocal.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/reciprocal.hpp new file mode 100644 index 000000000000..4d0fc91ca657 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/reciprocal.hpp @@ -0,0 +1,24 @@ +/// @ref gtc_reciprocal +/// @file glm/gtc/reciprocal.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_reciprocal GLM_GTC_reciprocal +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Define secant, cosecant and cotangent functions. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_reciprocal extension included") +#endif + +#include "../ext/scalar_reciprocal.hpp" +#include "../ext/vector_reciprocal.hpp" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/round.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/round.hpp new file mode 100644 index 000000000000..56edbbca30b5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/round.hpp @@ -0,0 +1,160 @@ +/// @ref gtc_round +/// @file glm/gtc/round.hpp +/// +/// @see core (dependence) +/// @see gtc_round (dependence) +/// +/// @defgroup gtc_round GLM_GTC_round +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Rounding value to specific boundings + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_vectorize.hpp" +#include "../vector_relational.hpp" +#include "../common.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_round extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_round + /// @{ + + /// Return the power of two number which value is just higher the input value, + /// round up to a power of two. + /// + /// @see gtc_round + template + GLM_FUNC_DECL genIUType ceilPowerOfTwo(genIUType v); + + /// Return the power of two number which value is just higher the input value, + /// round up to a power of two. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec ceilPowerOfTwo(vec const& v); + + /// Return the power of two number which value is just lower the input value, + /// round down to a power of two. + /// + /// @see gtc_round + template + GLM_FUNC_DECL genIUType floorPowerOfTwo(genIUType v); + + /// Return the power of two number which value is just lower the input value, + /// round down to a power of two. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec floorPowerOfTwo(vec const& v); + + /// Return the power of two number which value is the closet to the input value. + /// + /// @see gtc_round + template + GLM_FUNC_DECL genIUType roundPowerOfTwo(genIUType v); + + /// Return the power of two number which value is the closet to the input value. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec roundPowerOfTwo(vec const& v); + + /// Higher multiple number of Source. + /// + /// @tparam genType Floating-point or integer scalar or vector types. + /// + /// @param v Source value to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL genType ceilMultiple(genType v, genType Multiple); + + /// Higher multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec ceilMultiple(vec const& v, vec const& Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam genType Floating-point or integer scalar or vector types. + /// + /// @param v Source value to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL genType floorMultiple(genType v, genType Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec floorMultiple(vec const& v, vec const& Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam genType Floating-point or integer scalar or vector types. + /// + /// @param v Source value to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL genType roundMultiple(genType v, genType Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec roundMultiple(vec const& v, vec const& Multiple); + + /// @} +} //namespace glm + +#include "round.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/round.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/round.inl new file mode 100644 index 000000000000..48411e41dc34 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/round.inl @@ -0,0 +1,155 @@ +/// @ref gtc_round + +#include "../integer.hpp" +#include "../ext/vector_integer.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_roundMultiple {}; + + template<> + struct compute_roundMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if (Source >= genType(0)) + return Source - std::fmod(Source, Multiple); + else + { + genType Tmp = Source + genType(1); + return Tmp - std::fmod(Tmp, Multiple) - Multiple; + } + } + }; + + template<> + struct compute_roundMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if (Source >= genType(0)) + return Source - Source % Multiple; + else + { + genType Tmp = Source + genType(1); + return Tmp - Tmp % Multiple - Multiple; + } + } + }; + + template<> + struct compute_roundMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if (Source >= genType(0)) + return Source - Source % Multiple; + else + { + genType Tmp = Source + genType(1); + return Tmp - Tmp % Multiple - Multiple; + } + } + }; +}//namespace detail + + ////////////////// + // ceilPowerOfTwo + + template + GLM_FUNC_QUALIFIER genType ceilPowerOfTwo(genType value) + { + return detail::compute_ceilPowerOfTwo<1, genType, defaultp, std::numeric_limits::is_signed>::call(vec<1, genType, defaultp>(value)).x; + } + + template + GLM_FUNC_QUALIFIER vec ceilPowerOfTwo(vec const& v) + { + return detail::compute_ceilPowerOfTwo::is_signed>::call(v); + } + + /////////////////// + // floorPowerOfTwo + + template + GLM_FUNC_QUALIFIER genType floorPowerOfTwo(genType value) + { + return isPowerOfTwo(value) ? value : static_cast(1) << findMSB(value); + } + + template + GLM_FUNC_QUALIFIER vec floorPowerOfTwo(vec const& v) + { + return detail::functor1::call(floorPowerOfTwo, v); + } + + /////////////////// + // roundPowerOfTwo + + template + GLM_FUNC_QUALIFIER genIUType roundPowerOfTwo(genIUType value) + { + if(isPowerOfTwo(value)) + return value; + + genIUType const prev = static_cast(1) << findMSB(value); + genIUType const next = prev << static_cast(1); + return (next - value) < (value - prev) ? next : prev; + } + + template + GLM_FUNC_QUALIFIER vec roundPowerOfTwo(vec const& v) + { + return detail::functor1::call(roundPowerOfTwo, v); + } + + ////////////////////// + // ceilMultiple + + template + GLM_FUNC_QUALIFIER genType ceilMultiple(genType Source, genType Multiple) + { + return detail::compute_ceilMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER vec ceilMultiple(vec const& Source, vec const& Multiple) + { + return detail::functor2::call(ceilMultiple, Source, Multiple); + } + + ////////////////////// + // floorMultiple + + template + GLM_FUNC_QUALIFIER genType floorMultiple(genType Source, genType Multiple) + { + return detail::compute_floorMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER vec floorMultiple(vec const& Source, vec const& Multiple) + { + return detail::functor2::call(floorMultiple, Source, Multiple); + } + + ////////////////////// + // roundMultiple + + template + GLM_FUNC_QUALIFIER genType roundMultiple(genType Source, genType Multiple) + { + return detail::compute_roundMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER vec roundMultiple(vec const& Source, vec const& Multiple) + { + return detail::functor2::call(roundMultiple, Source, Multiple); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/type_aligned.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/type_aligned.hpp new file mode 100644 index 000000000000..5403abf67525 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/type_aligned.hpp @@ -0,0 +1,1315 @@ +/// @ref gtc_type_aligned +/// @file glm/gtc/type_aligned.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_type_aligned GLM_GTC_type_aligned +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Aligned types allowing SIMD optimizations of vectors and matrices types + +#pragma once + +#if (GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE) +# error "GLM: Aligned gentypes require to enable C++ language extensions. Define GLM_FORCE_ALIGNED_GENTYPES before including GLM headers to use aligned types." +#endif + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_type_aligned extension included") +#endif + +#include "../mat4x4.hpp" +#include "../mat4x3.hpp" +#include "../mat4x2.hpp" +#include "../mat3x4.hpp" +#include "../mat3x3.hpp" +#include "../mat3x2.hpp" +#include "../mat2x4.hpp" +#include "../mat2x3.hpp" +#include "../mat2x2.hpp" +#include "../gtc/vec1.hpp" +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" + +namespace glm +{ + /// @addtogroup gtc_type_aligned + /// @{ + + // -- *vec1 -- + + /// 1 component vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, float, aligned_highp> aligned_highp_vec1; + + /// 1 component vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, float, aligned_mediump> aligned_mediump_vec1; + + /// 1 component vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, float, aligned_lowp> aligned_lowp_vec1; + + /// 1 component vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, double, aligned_highp> aligned_highp_dvec1; + + /// 1 component vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, double, aligned_mediump> aligned_mediump_dvec1; + + /// 1 component vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, double, aligned_lowp> aligned_lowp_dvec1; + + /// 1 component vector aligned in memory of signed integer numbers. + typedef vec<1, int, aligned_highp> aligned_highp_ivec1; + + /// 1 component vector aligned in memory of signed integer numbers. + typedef vec<1, int, aligned_mediump> aligned_mediump_ivec1; + + /// 1 component vector aligned in memory of signed integer numbers. + typedef vec<1, int, aligned_lowp> aligned_lowp_ivec1; + + /// 1 component vector aligned in memory of unsigned integer numbers. + typedef vec<1, uint, aligned_highp> aligned_highp_uvec1; + + /// 1 component vector aligned in memory of unsigned integer numbers. + typedef vec<1, uint, aligned_mediump> aligned_mediump_uvec1; + + /// 1 component vector aligned in memory of unsigned integer numbers. + typedef vec<1, uint, aligned_lowp> aligned_lowp_uvec1; + + /// 1 component vector aligned in memory of bool values. + typedef vec<1, bool, aligned_highp> aligned_highp_bvec1; + + /// 1 component vector aligned in memory of bool values. + typedef vec<1, bool, aligned_mediump> aligned_mediump_bvec1; + + /// 1 component vector aligned in memory of bool values. + typedef vec<1, bool, aligned_lowp> aligned_lowp_bvec1; + + /// 1 component vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, float, packed_highp> packed_highp_vec1; + + /// 1 component vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, float, packed_mediump> packed_mediump_vec1; + + /// 1 component vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, float, packed_lowp> packed_lowp_vec1; + + /// 1 component vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, double, packed_highp> packed_highp_dvec1; + + /// 1 component vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, double, packed_mediump> packed_mediump_dvec1; + + /// 1 component vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, double, packed_lowp> packed_lowp_dvec1; + + /// 1 component vector tightly packed in memory of signed integer numbers. + typedef vec<1, int, packed_highp> packed_highp_ivec1; + + /// 1 component vector tightly packed in memory of signed integer numbers. + typedef vec<1, int, packed_mediump> packed_mediump_ivec1; + + /// 1 component vector tightly packed in memory of signed integer numbers. + typedef vec<1, int, packed_lowp> packed_lowp_ivec1; + + /// 1 component vector tightly packed in memory of unsigned integer numbers. + typedef vec<1, uint, packed_highp> packed_highp_uvec1; + + /// 1 component vector tightly packed in memory of unsigned integer numbers. + typedef vec<1, uint, packed_mediump> packed_mediump_uvec1; + + /// 1 component vector tightly packed in memory of unsigned integer numbers. + typedef vec<1, uint, packed_lowp> packed_lowp_uvec1; + + /// 1 component vector tightly packed in memory of bool values. + typedef vec<1, bool, packed_highp> packed_highp_bvec1; + + /// 1 component vector tightly packed in memory of bool values. + typedef vec<1, bool, packed_mediump> packed_mediump_bvec1; + + /// 1 component vector tightly packed in memory of bool values. + typedef vec<1, bool, packed_lowp> packed_lowp_bvec1; + + // -- *vec2 -- + + /// 2 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<2, float, aligned_highp> aligned_highp_vec2; + + /// 2 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<2, float, aligned_mediump> aligned_mediump_vec2; + + /// 2 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<2, float, aligned_lowp> aligned_lowp_vec2; + + /// 2 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<2, double, aligned_highp> aligned_highp_dvec2; + + /// 2 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<2, double, aligned_mediump> aligned_mediump_dvec2; + + /// 2 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<2, double, aligned_lowp> aligned_lowp_dvec2; + + /// 2 components vector aligned in memory of signed integer numbers. + typedef vec<2, int, aligned_highp> aligned_highp_ivec2; + + /// 2 components vector aligned in memory of signed integer numbers. + typedef vec<2, int, aligned_mediump> aligned_mediump_ivec2; + + /// 2 components vector aligned in memory of signed integer numbers. + typedef vec<2, int, aligned_lowp> aligned_lowp_ivec2; + + /// 2 components vector aligned in memory of unsigned integer numbers. + typedef vec<2, uint, aligned_highp> aligned_highp_uvec2; + + /// 2 components vector aligned in memory of unsigned integer numbers. + typedef vec<2, uint, aligned_mediump> aligned_mediump_uvec2; + + /// 2 components vector aligned in memory of unsigned integer numbers. + typedef vec<2, uint, aligned_lowp> aligned_lowp_uvec2; + + /// 2 components vector aligned in memory of bool values. + typedef vec<2, bool, aligned_highp> aligned_highp_bvec2; + + /// 2 components vector aligned in memory of bool values. + typedef vec<2, bool, aligned_mediump> aligned_mediump_bvec2; + + /// 2 components vector aligned in memory of bool values. + typedef vec<2, bool, aligned_lowp> aligned_lowp_bvec2; + + /// 2 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<2, float, packed_highp> packed_highp_vec2; + + /// 2 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<2, float, packed_mediump> packed_mediump_vec2; + + /// 2 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<2, float, packed_lowp> packed_lowp_vec2; + + /// 2 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<2, double, packed_highp> packed_highp_dvec2; + + /// 2 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<2, double, packed_mediump> packed_mediump_dvec2; + + /// 2 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<2, double, packed_lowp> packed_lowp_dvec2; + + /// 2 components vector tightly packed in memory of signed integer numbers. + typedef vec<2, int, packed_highp> packed_highp_ivec2; + + /// 2 components vector tightly packed in memory of signed integer numbers. + typedef vec<2, int, packed_mediump> packed_mediump_ivec2; + + /// 2 components vector tightly packed in memory of signed integer numbers. + typedef vec<2, int, packed_lowp> packed_lowp_ivec2; + + /// 2 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<2, uint, packed_highp> packed_highp_uvec2; + + /// 2 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<2, uint, packed_mediump> packed_mediump_uvec2; + + /// 2 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<2, uint, packed_lowp> packed_lowp_uvec2; + + /// 2 components vector tightly packed in memory of bool values. + typedef vec<2, bool, packed_highp> packed_highp_bvec2; + + /// 2 components vector tightly packed in memory of bool values. + typedef vec<2, bool, packed_mediump> packed_mediump_bvec2; + + /// 2 components vector tightly packed in memory of bool values. + typedef vec<2, bool, packed_lowp> packed_lowp_bvec2; + + // -- *vec3 -- + + /// 3 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<3, float, aligned_highp> aligned_highp_vec3; + + /// 3 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<3, float, aligned_mediump> aligned_mediump_vec3; + + /// 3 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<3, float, aligned_lowp> aligned_lowp_vec3; + + /// 3 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<3, double, aligned_highp> aligned_highp_dvec3; + + /// 3 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<3, double, aligned_mediump> aligned_mediump_dvec3; + + /// 3 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<3, double, aligned_lowp> aligned_lowp_dvec3; + + /// 3 components vector aligned in memory of signed integer numbers. + typedef vec<3, int, aligned_highp> aligned_highp_ivec3; + + /// 3 components vector aligned in memory of signed integer numbers. + typedef vec<3, int, aligned_mediump> aligned_mediump_ivec3; + + /// 3 components vector aligned in memory of signed integer numbers. + typedef vec<3, int, aligned_lowp> aligned_lowp_ivec3; + + /// 3 components vector aligned in memory of unsigned integer numbers. + typedef vec<3, uint, aligned_highp> aligned_highp_uvec3; + + /// 3 components vector aligned in memory of unsigned integer numbers. + typedef vec<3, uint, aligned_mediump> aligned_mediump_uvec3; + + /// 3 components vector aligned in memory of unsigned integer numbers. + typedef vec<3, uint, aligned_lowp> aligned_lowp_uvec3; + + /// 3 components vector aligned in memory of bool values. + typedef vec<3, bool, aligned_highp> aligned_highp_bvec3; + + /// 3 components vector aligned in memory of bool values. + typedef vec<3, bool, aligned_mediump> aligned_mediump_bvec3; + + /// 3 components vector aligned in memory of bool values. + typedef vec<3, bool, aligned_lowp> aligned_lowp_bvec3; + + /// 3 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<3, float, packed_highp> packed_highp_vec3; + + /// 3 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<3, float, packed_mediump> packed_mediump_vec3; + + /// 3 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<3, float, packed_lowp> packed_lowp_vec3; + + /// 3 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<3, double, packed_highp> packed_highp_dvec3; + + /// 3 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<3, double, packed_mediump> packed_mediump_dvec3; + + /// 3 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<3, double, packed_lowp> packed_lowp_dvec3; + + /// 3 components vector tightly packed in memory of signed integer numbers. + typedef vec<3, int, packed_highp> packed_highp_ivec3; + + /// 3 components vector tightly packed in memory of signed integer numbers. + typedef vec<3, int, packed_mediump> packed_mediump_ivec3; + + /// 3 components vector tightly packed in memory of signed integer numbers. + typedef vec<3, int, packed_lowp> packed_lowp_ivec3; + + /// 3 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<3, uint, packed_highp> packed_highp_uvec3; + + /// 3 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<3, uint, packed_mediump> packed_mediump_uvec3; + + /// 3 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<3, uint, packed_lowp> packed_lowp_uvec3; + + /// 3 components vector tightly packed in memory of bool values. + typedef vec<3, bool, packed_highp> packed_highp_bvec3; + + /// 3 components vector tightly packed in memory of bool values. + typedef vec<3, bool, packed_mediump> packed_mediump_bvec3; + + /// 3 components vector tightly packed in memory of bool values. + typedef vec<3, bool, packed_lowp> packed_lowp_bvec3; + + // -- *vec4 -- + + /// 4 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<4, float, aligned_highp> aligned_highp_vec4; + + /// 4 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<4, float, aligned_mediump> aligned_mediump_vec4; + + /// 4 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<4, float, aligned_lowp> aligned_lowp_vec4; + + /// 4 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<4, double, aligned_highp> aligned_highp_dvec4; + + /// 4 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<4, double, aligned_mediump> aligned_mediump_dvec4; + + /// 4 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<4, double, aligned_lowp> aligned_lowp_dvec4; + + /// 4 components vector aligned in memory of signed integer numbers. + typedef vec<4, int, aligned_highp> aligned_highp_ivec4; + + /// 4 components vector aligned in memory of signed integer numbers. + typedef vec<4, int, aligned_mediump> aligned_mediump_ivec4; + + /// 4 components vector aligned in memory of signed integer numbers. + typedef vec<4, int, aligned_lowp> aligned_lowp_ivec4; + + /// 4 components vector aligned in memory of unsigned integer numbers. + typedef vec<4, uint, aligned_highp> aligned_highp_uvec4; + + /// 4 components vector aligned in memory of unsigned integer numbers. + typedef vec<4, uint, aligned_mediump> aligned_mediump_uvec4; + + /// 4 components vector aligned in memory of unsigned integer numbers. + typedef vec<4, uint, aligned_lowp> aligned_lowp_uvec4; + + /// 4 components vector aligned in memory of bool values. + typedef vec<4, bool, aligned_highp> aligned_highp_bvec4; + + /// 4 components vector aligned in memory of bool values. + typedef vec<4, bool, aligned_mediump> aligned_mediump_bvec4; + + /// 4 components vector aligned in memory of bool values. + typedef vec<4, bool, aligned_lowp> aligned_lowp_bvec4; + + /// 4 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<4, float, packed_highp> packed_highp_vec4; + + /// 4 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<4, float, packed_mediump> packed_mediump_vec4; + + /// 4 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<4, float, packed_lowp> packed_lowp_vec4; + + /// 4 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<4, double, packed_highp> packed_highp_dvec4; + + /// 4 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<4, double, packed_mediump> packed_mediump_dvec4; + + /// 4 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<4, double, packed_lowp> packed_lowp_dvec4; + + /// 4 components vector tightly packed in memory of signed integer numbers. + typedef vec<4, int, packed_highp> packed_highp_ivec4; + + /// 4 components vector tightly packed in memory of signed integer numbers. + typedef vec<4, int, packed_mediump> packed_mediump_ivec4; + + /// 4 components vector tightly packed in memory of signed integer numbers. + typedef vec<4, int, packed_lowp> packed_lowp_ivec4; + + /// 4 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<4, uint, packed_highp> packed_highp_uvec4; + + /// 4 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<4, uint, packed_mediump> packed_mediump_uvec4; + + /// 4 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<4, uint, packed_lowp> packed_lowp_uvec4; + + /// 4 components vector tightly packed in memory of bool values. + typedef vec<4, bool, packed_highp> packed_highp_bvec4; + + /// 4 components vector tightly packed in memory of bool values. + typedef vec<4, bool, packed_mediump> packed_mediump_bvec4; + + /// 4 components vector tightly packed in memory of bool values. + typedef vec<4, bool, packed_lowp> packed_lowp_bvec4; + + // -- *mat2 -- + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_highp> aligned_highp_mat2; + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_mediump> aligned_mediump_mat2; + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_lowp> aligned_lowp_mat2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_highp> aligned_highp_dmat2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_mediump> aligned_mediump_dmat2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_lowp> aligned_lowp_dmat2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_highp> packed_highp_mat2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_mediump> packed_mediump_mat2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_lowp> packed_lowp_mat2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_highp> packed_highp_dmat2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_mediump> packed_mediump_dmat2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_lowp> packed_lowp_dmat2; + + // -- *mat3 -- + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_highp> aligned_highp_mat3; + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_mediump> aligned_mediump_mat3; + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_lowp> aligned_lowp_mat3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_highp> aligned_highp_dmat3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_mediump> aligned_mediump_dmat3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_lowp> aligned_lowp_dmat3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_highp> packed_highp_mat3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_mediump> packed_mediump_mat3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_lowp> packed_lowp_mat3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_highp> packed_highp_dmat3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_mediump> packed_mediump_dmat3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_lowp> packed_lowp_dmat3; + + // -- *mat4 -- + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_highp> aligned_highp_mat4; + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_mediump> aligned_mediump_mat4; + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_lowp> aligned_lowp_mat4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_highp> aligned_highp_dmat4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_mediump> aligned_mediump_dmat4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_lowp> aligned_lowp_dmat4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_highp> packed_highp_mat4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_mediump> packed_mediump_mat4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_lowp> packed_lowp_mat4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_highp> packed_highp_dmat4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_mediump> packed_mediump_dmat4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_lowp> packed_lowp_dmat4; + + // -- *mat2x2 -- + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_highp> aligned_highp_mat2x2; + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_mediump> aligned_mediump_mat2x2; + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_lowp> aligned_lowp_mat2x2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_highp> aligned_highp_dmat2x2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_mediump> aligned_mediump_dmat2x2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_lowp> aligned_lowp_dmat2x2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_highp> packed_highp_mat2x2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_mediump> packed_mediump_mat2x2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_lowp> packed_lowp_mat2x2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_highp> packed_highp_dmat2x2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_mediump> packed_mediump_dmat2x2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_lowp> packed_lowp_dmat2x2; + + // -- *mat2x3 -- + + /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 3, float, aligned_highp> aligned_highp_mat2x3; + + /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 3, float, aligned_mediump> aligned_mediump_mat2x3; + + /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 3, float, aligned_lowp> aligned_lowp_mat2x3; + + /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 3, double, aligned_highp> aligned_highp_dmat2x3; + + /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 3, double, aligned_mediump> aligned_mediump_dmat2x3; + + /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 3, double, aligned_lowp> aligned_lowp_dmat2x3; + + /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 3, float, packed_highp> packed_highp_mat2x3; + + /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 3, float, packed_mediump> packed_mediump_mat2x3; + + /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 3, float, packed_lowp> packed_lowp_mat2x3; + + /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 3, double, packed_highp> packed_highp_dmat2x3; + + /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 3, double, packed_mediump> packed_mediump_dmat2x3; + + /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 3, double, packed_lowp> packed_lowp_dmat2x3; + + // -- *mat2x4 -- + + /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 4, float, aligned_highp> aligned_highp_mat2x4; + + /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 4, float, aligned_mediump> aligned_mediump_mat2x4; + + /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 4, float, aligned_lowp> aligned_lowp_mat2x4; + + /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 4, double, aligned_highp> aligned_highp_dmat2x4; + + /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 4, double, aligned_mediump> aligned_mediump_dmat2x4; + + /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 4, double, aligned_lowp> aligned_lowp_dmat2x4; + + /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 4, float, packed_highp> packed_highp_mat2x4; + + /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 4, float, packed_mediump> packed_mediump_mat2x4; + + /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 4, float, packed_lowp> packed_lowp_mat2x4; + + /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 4, double, packed_highp> packed_highp_dmat2x4; + + /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 4, double, packed_mediump> packed_mediump_dmat2x4; + + /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 4, double, packed_lowp> packed_lowp_dmat2x4; + + // -- *mat3x2 -- + + /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 2, float, aligned_highp> aligned_highp_mat3x2; + + /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 2, float, aligned_mediump> aligned_mediump_mat3x2; + + /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 2, float, aligned_lowp> aligned_lowp_mat3x2; + + /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 2, double, aligned_highp> aligned_highp_dmat3x2; + + /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 2, double, aligned_mediump> aligned_mediump_dmat3x2; + + /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 2, double, aligned_lowp> aligned_lowp_dmat3x2; + + /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 2, float, packed_highp> packed_highp_mat3x2; + + /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 2, float, packed_mediump> packed_mediump_mat3x2; + + /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 2, float, packed_lowp> packed_lowp_mat3x2; + + /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 2, double, packed_highp> packed_highp_dmat3x2; + + /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 2, double, packed_mediump> packed_mediump_dmat3x2; + + /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 2, double, packed_lowp> packed_lowp_dmat3x2; + + // -- *mat3x3 -- + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_highp> aligned_highp_mat3x3; + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_mediump> aligned_mediump_mat3x3; + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_lowp> aligned_lowp_mat3x3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_highp> aligned_highp_dmat3x3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_mediump> aligned_mediump_dmat3x3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_lowp> aligned_lowp_dmat3x3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_highp> packed_highp_mat3x3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_mediump> packed_mediump_mat3x3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_lowp> packed_lowp_mat3x3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_highp> packed_highp_dmat3x3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_mediump> packed_mediump_dmat3x3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_lowp> packed_lowp_dmat3x3; + + // -- *mat3x4 -- + + /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 4, float, aligned_highp> aligned_highp_mat3x4; + + /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 4, float, aligned_mediump> aligned_mediump_mat3x4; + + /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 4, float, aligned_lowp> aligned_lowp_mat3x4; + + /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 4, double, aligned_highp> aligned_highp_dmat3x4; + + /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 4, double, aligned_mediump> aligned_mediump_dmat3x4; + + /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 4, double, aligned_lowp> aligned_lowp_dmat3x4; + + /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 4, float, packed_highp> packed_highp_mat3x4; + + /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 4, float, packed_mediump> packed_mediump_mat3x4; + + /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 4, float, packed_lowp> packed_lowp_mat3x4; + + /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 4, double, packed_highp> packed_highp_dmat3x4; + + /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 4, double, packed_mediump> packed_mediump_dmat3x4; + + /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 4, double, packed_lowp> packed_lowp_dmat3x4; + + // -- *mat4x2 -- + + /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 2, float, aligned_highp> aligned_highp_mat4x2; + + /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 2, float, aligned_mediump> aligned_mediump_mat4x2; + + /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 2, float, aligned_lowp> aligned_lowp_mat4x2; + + /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 2, double, aligned_highp> aligned_highp_dmat4x2; + + /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 2, double, aligned_mediump> aligned_mediump_dmat4x2; + + /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 2, double, aligned_lowp> aligned_lowp_dmat4x2; + + /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 2, float, packed_highp> packed_highp_mat4x2; + + /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 2, float, packed_mediump> packed_mediump_mat4x2; + + /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 2, float, packed_lowp> packed_lowp_mat4x2; + + /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 2, double, packed_highp> packed_highp_dmat4x2; + + /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 2, double, packed_mediump> packed_mediump_dmat4x2; + + /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 2, double, packed_lowp> packed_lowp_dmat4x2; + + // -- *mat4x3 -- + + /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 3, float, aligned_highp> aligned_highp_mat4x3; + + /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 3, float, aligned_mediump> aligned_mediump_mat4x3; + + /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 3, float, aligned_lowp> aligned_lowp_mat4x3; + + /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 3, double, aligned_highp> aligned_highp_dmat4x3; + + /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 3, double, aligned_mediump> aligned_mediump_dmat4x3; + + /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 3, double, aligned_lowp> aligned_lowp_dmat4x3; + + /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 3, float, packed_highp> packed_highp_mat4x3; + + /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 3, float, packed_mediump> packed_mediump_mat4x3; + + /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 3, float, packed_lowp> packed_lowp_mat4x3; + + /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 3, double, packed_highp> packed_highp_dmat4x3; + + /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 3, double, packed_mediump> packed_mediump_dmat4x3; + + /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 3, double, packed_lowp> packed_lowp_dmat4x3; + + // -- *mat4x4 -- + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_highp> aligned_highp_mat4x4; + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_mediump> aligned_mediump_mat4x4; + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_lowp> aligned_lowp_mat4x4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_highp> aligned_highp_dmat4x4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_mediump> aligned_mediump_dmat4x4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_lowp> aligned_lowp_dmat4x4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_highp> packed_highp_mat4x4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_mediump> packed_mediump_mat4x4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_lowp> packed_lowp_mat4x4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_highp> packed_highp_dmat4x4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_mediump> packed_mediump_dmat4x4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_lowp> packed_lowp_dmat4x4; + + // -- default -- + +#if(defined(GLM_PRECISION_LOWP_FLOAT)) + typedef aligned_lowp_vec1 aligned_vec1; + typedef aligned_lowp_vec2 aligned_vec2; + typedef aligned_lowp_vec3 aligned_vec3; + typedef aligned_lowp_vec4 aligned_vec4; + typedef packed_lowp_vec1 packed_vec1; + typedef packed_lowp_vec2 packed_vec2; + typedef packed_lowp_vec3 packed_vec3; + typedef packed_lowp_vec4 packed_vec4; + + typedef aligned_lowp_mat2 aligned_mat2; + typedef aligned_lowp_mat3 aligned_mat3; + typedef aligned_lowp_mat4 aligned_mat4; + typedef packed_lowp_mat2 packed_mat2; + typedef packed_lowp_mat3 packed_mat3; + typedef packed_lowp_mat4 packed_mat4; + + typedef aligned_lowp_mat2x2 aligned_mat2x2; + typedef aligned_lowp_mat2x3 aligned_mat2x3; + typedef aligned_lowp_mat2x4 aligned_mat2x4; + typedef aligned_lowp_mat3x2 aligned_mat3x2; + typedef aligned_lowp_mat3x3 aligned_mat3x3; + typedef aligned_lowp_mat3x4 aligned_mat3x4; + typedef aligned_lowp_mat4x2 aligned_mat4x2; + typedef aligned_lowp_mat4x3 aligned_mat4x3; + typedef aligned_lowp_mat4x4 aligned_mat4x4; + typedef packed_lowp_mat2x2 packed_mat2x2; + typedef packed_lowp_mat2x3 packed_mat2x3; + typedef packed_lowp_mat2x4 packed_mat2x4; + typedef packed_lowp_mat3x2 packed_mat3x2; + typedef packed_lowp_mat3x3 packed_mat3x3; + typedef packed_lowp_mat3x4 packed_mat3x4; + typedef packed_lowp_mat4x2 packed_mat4x2; + typedef packed_lowp_mat4x3 packed_mat4x3; + typedef packed_lowp_mat4x4 packed_mat4x4; +#elif(defined(GLM_PRECISION_MEDIUMP_FLOAT)) + typedef aligned_mediump_vec1 aligned_vec1; + typedef aligned_mediump_vec2 aligned_vec2; + typedef aligned_mediump_vec3 aligned_vec3; + typedef aligned_mediump_vec4 aligned_vec4; + typedef packed_mediump_vec1 packed_vec1; + typedef packed_mediump_vec2 packed_vec2; + typedef packed_mediump_vec3 packed_vec3; + typedef packed_mediump_vec4 packed_vec4; + + typedef aligned_mediump_mat2 aligned_mat2; + typedef aligned_mediump_mat3 aligned_mat3; + typedef aligned_mediump_mat4 aligned_mat4; + typedef packed_mediump_mat2 packed_mat2; + typedef packed_mediump_mat3 packed_mat3; + typedef packed_mediump_mat4 packed_mat4; + + typedef aligned_mediump_mat2x2 aligned_mat2x2; + typedef aligned_mediump_mat2x3 aligned_mat2x3; + typedef aligned_mediump_mat2x4 aligned_mat2x4; + typedef aligned_mediump_mat3x2 aligned_mat3x2; + typedef aligned_mediump_mat3x3 aligned_mat3x3; + typedef aligned_mediump_mat3x4 aligned_mat3x4; + typedef aligned_mediump_mat4x2 aligned_mat4x2; + typedef aligned_mediump_mat4x3 aligned_mat4x3; + typedef aligned_mediump_mat4x4 aligned_mat4x4; + typedef packed_mediump_mat2x2 packed_mat2x2; + typedef packed_mediump_mat2x3 packed_mat2x3; + typedef packed_mediump_mat2x4 packed_mat2x4; + typedef packed_mediump_mat3x2 packed_mat3x2; + typedef packed_mediump_mat3x3 packed_mat3x3; + typedef packed_mediump_mat3x4 packed_mat3x4; + typedef packed_mediump_mat4x2 packed_mat4x2; + typedef packed_mediump_mat4x3 packed_mat4x3; + typedef packed_mediump_mat4x4 packed_mat4x4; +#else //defined(GLM_PRECISION_HIGHP_FLOAT) + /// 1 component vector aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_vec1 aligned_vec1; + + /// 2 components vector aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_vec2 aligned_vec2; + + /// 3 components vector aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_vec3 aligned_vec3; + + /// 4 components vector aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_vec4 aligned_vec4; + + /// 1 component vector tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_vec1 packed_vec1; + + /// 2 components vector tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_vec2 packed_vec2; + + /// 3 components vector tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_vec3 packed_vec3; + + /// 4 components vector tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_vec4 packed_vec4; + + /// 2 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat2 aligned_mat2; + + /// 3 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat3 aligned_mat3; + + /// 4 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat4 aligned_mat4; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat2 packed_mat2; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat3 packed_mat3; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat4 packed_mat4; + + /// 2 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat2x2 aligned_mat2x2; + + /// 2 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat2x3 aligned_mat2x3; + + /// 2 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat2x4 aligned_mat2x4; + + /// 3 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat3x2 aligned_mat3x2; + + /// 3 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat3x3 aligned_mat3x3; + + /// 3 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat3x4 aligned_mat3x4; + + /// 4 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat4x2 aligned_mat4x2; + + /// 4 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat4x3 aligned_mat4x3; + + /// 4 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat4x4 aligned_mat4x4; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat2x2 packed_mat2x2; + + /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat2x3 packed_mat2x3; + + /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat2x4 packed_mat2x4; + + /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat3x2 packed_mat3x2; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat3x3 packed_mat3x3; + + /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat3x4 packed_mat3x4; + + /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat4x2 packed_mat4x2; + + /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat4x3 packed_mat4x3; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat4x4 packed_mat4x4; +#endif//GLM_PRECISION + +#if(defined(GLM_PRECISION_LOWP_DOUBLE)) + typedef aligned_lowp_dvec1 aligned_dvec1; + typedef aligned_lowp_dvec2 aligned_dvec2; + typedef aligned_lowp_dvec3 aligned_dvec3; + typedef aligned_lowp_dvec4 aligned_dvec4; + typedef packed_lowp_dvec1 packed_dvec1; + typedef packed_lowp_dvec2 packed_dvec2; + typedef packed_lowp_dvec3 packed_dvec3; + typedef packed_lowp_dvec4 packed_dvec4; + + typedef aligned_lowp_dmat2 aligned_dmat2; + typedef aligned_lowp_dmat3 aligned_dmat3; + typedef aligned_lowp_dmat4 aligned_dmat4; + typedef packed_lowp_dmat2 packed_dmat2; + typedef packed_lowp_dmat3 packed_dmat3; + typedef packed_lowp_dmat4 packed_dmat4; + + typedef aligned_lowp_dmat2x2 aligned_dmat2x2; + typedef aligned_lowp_dmat2x3 aligned_dmat2x3; + typedef aligned_lowp_dmat2x4 aligned_dmat2x4; + typedef aligned_lowp_dmat3x2 aligned_dmat3x2; + typedef aligned_lowp_dmat3x3 aligned_dmat3x3; + typedef aligned_lowp_dmat3x4 aligned_dmat3x4; + typedef aligned_lowp_dmat4x2 aligned_dmat4x2; + typedef aligned_lowp_dmat4x3 aligned_dmat4x3; + typedef aligned_lowp_dmat4x4 aligned_dmat4x4; + typedef packed_lowp_dmat2x2 packed_dmat2x2; + typedef packed_lowp_dmat2x3 packed_dmat2x3; + typedef packed_lowp_dmat2x4 packed_dmat2x4; + typedef packed_lowp_dmat3x2 packed_dmat3x2; + typedef packed_lowp_dmat3x3 packed_dmat3x3; + typedef packed_lowp_dmat3x4 packed_dmat3x4; + typedef packed_lowp_dmat4x2 packed_dmat4x2; + typedef packed_lowp_dmat4x3 packed_dmat4x3; + typedef packed_lowp_dmat4x4 packed_dmat4x4; +#elif(defined(GLM_PRECISION_MEDIUMP_DOUBLE)) + typedef aligned_mediump_dvec1 aligned_dvec1; + typedef aligned_mediump_dvec2 aligned_dvec2; + typedef aligned_mediump_dvec3 aligned_dvec3; + typedef aligned_mediump_dvec4 aligned_dvec4; + typedef packed_mediump_dvec1 packed_dvec1; + typedef packed_mediump_dvec2 packed_dvec2; + typedef packed_mediump_dvec3 packed_dvec3; + typedef packed_mediump_dvec4 packed_dvec4; + + typedef aligned_mediump_dmat2 aligned_dmat2; + typedef aligned_mediump_dmat3 aligned_dmat3; + typedef aligned_mediump_dmat4 aligned_dmat4; + typedef packed_mediump_dmat2 packed_dmat2; + typedef packed_mediump_dmat3 packed_dmat3; + typedef packed_mediump_dmat4 packed_dmat4; + + typedef aligned_mediump_dmat2x2 aligned_dmat2x2; + typedef aligned_mediump_dmat2x3 aligned_dmat2x3; + typedef aligned_mediump_dmat2x4 aligned_dmat2x4; + typedef aligned_mediump_dmat3x2 aligned_dmat3x2; + typedef aligned_mediump_dmat3x3 aligned_dmat3x3; + typedef aligned_mediump_dmat3x4 aligned_dmat3x4; + typedef aligned_mediump_dmat4x2 aligned_dmat4x2; + typedef aligned_mediump_dmat4x3 aligned_dmat4x3; + typedef aligned_mediump_dmat4x4 aligned_dmat4x4; + typedef packed_mediump_dmat2x2 packed_dmat2x2; + typedef packed_mediump_dmat2x3 packed_dmat2x3; + typedef packed_mediump_dmat2x4 packed_dmat2x4; + typedef packed_mediump_dmat3x2 packed_dmat3x2; + typedef packed_mediump_dmat3x3 packed_dmat3x3; + typedef packed_mediump_dmat3x4 packed_dmat3x4; + typedef packed_mediump_dmat4x2 packed_dmat4x2; + typedef packed_mediump_dmat4x3 packed_dmat4x3; + typedef packed_mediump_dmat4x4 packed_dmat4x4; +#else //defined(GLM_PRECISION_HIGHP_DOUBLE) + /// 1 component vector aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dvec1 aligned_dvec1; + + /// 2 components vector aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dvec2 aligned_dvec2; + + /// 3 components vector aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dvec3 aligned_dvec3; + + /// 4 components vector aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dvec4 aligned_dvec4; + + /// 1 component vector tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dvec1 packed_dvec1; + + /// 2 components vector tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dvec2 packed_dvec2; + + /// 3 components vector tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dvec3 packed_dvec3; + + /// 4 components vector tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dvec4 packed_dvec4; + + /// 2 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat2 aligned_dmat2; + + /// 3 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat3 aligned_dmat3; + + /// 4 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat4 aligned_dmat4; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat2 packed_dmat2; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat3 packed_dmat3; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat4 packed_dmat4; + + /// 2 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat2x2 aligned_dmat2x2; + + /// 2 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat2x3 aligned_dmat2x3; + + /// 2 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat2x4 aligned_dmat2x4; + + /// 3 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat3x2 aligned_dmat3x2; + + /// 3 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat3x3 aligned_dmat3x3; + + /// 3 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat3x4 aligned_dmat3x4; + + /// 4 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat4x2 aligned_dmat4x2; + + /// 4 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat4x3 aligned_dmat4x3; + + /// 4 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat4x4 aligned_dmat4x4; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat2x2 packed_dmat2x2; + + /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat2x3 packed_dmat2x3; + + /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat2x4 packed_dmat2x4; + + /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat3x2 packed_dmat3x2; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat3x3 packed_dmat3x3; + + /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat3x4 packed_dmat3x4; + + /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat4x2 packed_dmat4x2; + + /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat4x3 packed_dmat4x3; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat4x4 packed_dmat4x4; +#endif//GLM_PRECISION + +#if(defined(GLM_PRECISION_LOWP_INT)) + typedef aligned_lowp_ivec1 aligned_ivec1; + typedef aligned_lowp_ivec2 aligned_ivec2; + typedef aligned_lowp_ivec3 aligned_ivec3; + typedef aligned_lowp_ivec4 aligned_ivec4; +#elif(defined(GLM_PRECISION_MEDIUMP_INT)) + typedef aligned_mediump_ivec1 aligned_ivec1; + typedef aligned_mediump_ivec2 aligned_ivec2; + typedef aligned_mediump_ivec3 aligned_ivec3; + typedef aligned_mediump_ivec4 aligned_ivec4; +#else //defined(GLM_PRECISION_HIGHP_INT) + /// 1 component vector aligned in memory of signed integer numbers. + typedef aligned_highp_ivec1 aligned_ivec1; + + /// 2 components vector aligned in memory of signed integer numbers. + typedef aligned_highp_ivec2 aligned_ivec2; + + /// 3 components vector aligned in memory of signed integer numbers. + typedef aligned_highp_ivec3 aligned_ivec3; + + /// 4 components vector aligned in memory of signed integer numbers. + typedef aligned_highp_ivec4 aligned_ivec4; + + /// 1 component vector tightly packed in memory of signed integer numbers. + typedef packed_highp_ivec1 packed_ivec1; + + /// 2 components vector tightly packed in memory of signed integer numbers. + typedef packed_highp_ivec2 packed_ivec2; + + /// 3 components vector tightly packed in memory of signed integer numbers. + typedef packed_highp_ivec3 packed_ivec3; + + /// 4 components vector tightly packed in memory of signed integer numbers. + typedef packed_highp_ivec4 packed_ivec4; +#endif//GLM_PRECISION + + // -- Unsigned integer definition -- + +#if(defined(GLM_PRECISION_LOWP_UINT)) + typedef aligned_lowp_uvec1 aligned_uvec1; + typedef aligned_lowp_uvec2 aligned_uvec2; + typedef aligned_lowp_uvec3 aligned_uvec3; + typedef aligned_lowp_uvec4 aligned_uvec4; +#elif(defined(GLM_PRECISION_MEDIUMP_UINT)) + typedef aligned_mediump_uvec1 aligned_uvec1; + typedef aligned_mediump_uvec2 aligned_uvec2; + typedef aligned_mediump_uvec3 aligned_uvec3; + typedef aligned_mediump_uvec4 aligned_uvec4; +#else //defined(GLM_PRECISION_HIGHP_UINT) + /// 1 component vector aligned in memory of unsigned integer numbers. + typedef aligned_highp_uvec1 aligned_uvec1; + + /// 2 components vector aligned in memory of unsigned integer numbers. + typedef aligned_highp_uvec2 aligned_uvec2; + + /// 3 components vector aligned in memory of unsigned integer numbers. + typedef aligned_highp_uvec3 aligned_uvec3; + + /// 4 components vector aligned in memory of unsigned integer numbers. + typedef aligned_highp_uvec4 aligned_uvec4; + + /// 1 component vector tightly packed in memory of unsigned integer numbers. + typedef packed_highp_uvec1 packed_uvec1; + + /// 2 components vector tightly packed in memory of unsigned integer numbers. + typedef packed_highp_uvec2 packed_uvec2; + + /// 3 components vector tightly packed in memory of unsigned integer numbers. + typedef packed_highp_uvec3 packed_uvec3; + + /// 4 components vector tightly packed in memory of unsigned integer numbers. + typedef packed_highp_uvec4 packed_uvec4; +#endif//GLM_PRECISION + +#if(defined(GLM_PRECISION_LOWP_BOOL)) + typedef aligned_lowp_bvec1 aligned_bvec1; + typedef aligned_lowp_bvec2 aligned_bvec2; + typedef aligned_lowp_bvec3 aligned_bvec3; + typedef aligned_lowp_bvec4 aligned_bvec4; +#elif(defined(GLM_PRECISION_MEDIUMP_BOOL)) + typedef aligned_mediump_bvec1 aligned_bvec1; + typedef aligned_mediump_bvec2 aligned_bvec2; + typedef aligned_mediump_bvec3 aligned_bvec3; + typedef aligned_mediump_bvec4 aligned_bvec4; +#else //defined(GLM_PRECISION_HIGHP_BOOL) + /// 1 component vector aligned in memory of bool values. + typedef aligned_highp_bvec1 aligned_bvec1; + + /// 2 components vector aligned in memory of bool values. + typedef aligned_highp_bvec2 aligned_bvec2; + + /// 3 components vector aligned in memory of bool values. + typedef aligned_highp_bvec3 aligned_bvec3; + + /// 4 components vector aligned in memory of bool values. + typedef aligned_highp_bvec4 aligned_bvec4; + + /// 1 components vector tightly packed in memory of bool values. + typedef packed_highp_bvec1 packed_bvec1; + + /// 2 components vector tightly packed in memory of bool values. + typedef packed_highp_bvec2 packed_bvec2; + + /// 3 components vector tightly packed in memory of bool values. + typedef packed_highp_bvec3 packed_bvec3; + + /// 4 components vector tightly packed in memory of bool values. + typedef packed_highp_bvec4 packed_bvec4; +#endif//GLM_PRECISION + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/type_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/type_precision.hpp new file mode 100644 index 000000000000..775e2f484d73 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/type_precision.hpp @@ -0,0 +1,2094 @@ +/// @ref gtc_type_precision +/// @file glm/gtc/type_precision.hpp +/// +/// @see core (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtc_type_precision GLM_GTC_type_precision +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines specific C++-based qualifier types. + +#pragma once + +// Dependency: +#include "../gtc/quaternion.hpp" +#include "../gtc/vec1.hpp" +#include "../ext/vector_int1_sized.hpp" +#include "../ext/vector_int2_sized.hpp" +#include "../ext/vector_int3_sized.hpp" +#include "../ext/vector_int4_sized.hpp" +#include "../ext/scalar_int_sized.hpp" +#include "../ext/vector_uint1_sized.hpp" +#include "../ext/vector_uint2_sized.hpp" +#include "../ext/vector_uint3_sized.hpp" +#include "../ext/vector_uint4_sized.hpp" +#include "../ext/scalar_uint_sized.hpp" +#include "../detail/type_vec2.hpp" +#include "../detail/type_vec3.hpp" +#include "../detail/type_vec4.hpp" +#include "../detail/type_mat2x2.hpp" +#include "../detail/type_mat2x3.hpp" +#include "../detail/type_mat2x4.hpp" +#include "../detail/type_mat3x2.hpp" +#include "../detail/type_mat3x3.hpp" +#include "../detail/type_mat3x4.hpp" +#include "../detail/type_mat4x2.hpp" +#include "../detail/type_mat4x3.hpp" +#include "../detail/type_mat4x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_type_precision extension included") +#endif + +namespace glm +{ + /////////////////////////// + // Signed int vector types + + /// @addtogroup gtc_type_precision + /// @{ + + /// Low qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 lowp_int8; + + /// Low qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 lowp_int16; + + /// Low qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 lowp_int32; + + /// Low qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 lowp_int64; + + /// Low qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 lowp_int8_t; + + /// Low qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 lowp_int16_t; + + /// Low qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 lowp_int32_t; + + /// Low qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 lowp_int64_t; + + /// Low qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 lowp_i8; + + /// Low qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 lowp_i16; + + /// Low qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 lowp_i32; + + /// Low qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 lowp_i64; + + /// Medium qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 mediump_int8; + + /// Medium qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 mediump_int16; + + /// Medium qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 mediump_int32; + + /// Medium qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 mediump_int64; + + /// Medium qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 mediump_int8_t; + + /// Medium qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 mediump_int16_t; + + /// Medium qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 mediump_int32_t; + + /// Medium qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 mediump_int64_t; + + /// Medium qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 mediump_i8; + + /// Medium qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 mediump_i16; + + /// Medium qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 mediump_i32; + + /// Medium qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 mediump_i64; + + /// High qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 highp_int8; + + /// High qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 highp_int16; + + /// High qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 highp_int32; + + /// High qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 highp_int64; + + /// High qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 highp_int8_t; + + /// High qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 highp_int16_t; + + /// 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 highp_int32_t; + + /// High qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 highp_int64_t; + + /// High qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 highp_i8; + + /// High qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 highp_i16; + + /// High qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 highp_i32; + + /// High qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 highp_i64; + + +#if GLM_HAS_EXTENDED_INTEGER_TYPE + using std::int8_t; + using std::int16_t; + using std::int32_t; + using std::int64_t; +#else + /// 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 int8_t; + + /// 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 int16_t; + + /// 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 int32_t; + + /// 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 int64_t; +#endif + + /// 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 i8; + + /// 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 i16; + + /// 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 i32; + + /// 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 i64; + + ///////////////////////////// + // Unsigned int vector types + + /// Low qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 lowp_uint8; + + /// Low qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 lowp_uint16; + + /// Low qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 lowp_uint32; + + /// Low qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 lowp_uint64; + + /// Low qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 lowp_uint8_t; + + /// Low qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 lowp_uint16_t; + + /// Low qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 lowp_uint32_t; + + /// Low qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 lowp_uint64_t; + + /// Low qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 lowp_u8; + + /// Low qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 lowp_u16; + + /// Low qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 lowp_u32; + + /// Low qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 lowp_u64; + + /// Medium qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 mediump_uint8; + + /// Medium qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 mediump_uint16; + + /// Medium qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 mediump_uint32; + + /// Medium qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 mediump_uint64; + + /// Medium qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 mediump_uint8_t; + + /// Medium qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 mediump_uint16_t; + + /// Medium qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 mediump_uint32_t; + + /// Medium qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 mediump_uint64_t; + + /// Medium qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 mediump_u8; + + /// Medium qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 mediump_u16; + + /// Medium qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 mediump_u32; + + /// Medium qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 mediump_u64; + + /// High qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 highp_uint8; + + /// High qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 highp_uint16; + + /// High qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 highp_uint32; + + /// High qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 highp_uint64; + + /// High qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 highp_uint8_t; + + /// High qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 highp_uint16_t; + + /// High qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 highp_uint32_t; + + /// High qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 highp_uint64_t; + + /// High qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 highp_u8; + + /// High qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 highp_u16; + + /// High qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 highp_u32; + + /// High qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 highp_u64; + +#if GLM_HAS_EXTENDED_INTEGER_TYPE + using std::uint8_t; + using std::uint16_t; + using std::uint32_t; + using std::uint64_t; +#else + /// Default qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 uint8_t; + + /// Default qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 uint16_t; + + /// Default qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 uint32_t; + + /// Default qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 uint64_t; +#endif + + /// Default qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 u8; + + /// Default qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 u16; + + /// Default qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 u32; + + /// Default qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 u64; + + + + + + ////////////////////// + // Float vector types + + /// Single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float float32; + + /// Double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef double float64; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32_t; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64_t; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_f32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_f64; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32_t; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64_t; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_f32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_f64; + + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32_t; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64_t; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_f32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_f64; + + + /// Medium 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 mediump_float32; + + /// Medium 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 mediump_float64; + + /// Medium 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 mediump_float32_t; + + /// Medium 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 mediump_float64_t; + + /// Medium 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 mediump_f32; + + /// Medium 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 mediump_f64; + + + /// High 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 highp_float32; + + /// High 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 highp_float64; + + /// High 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 highp_float32_t; + + /// High 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 highp_float64_t; + + /// High 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 highp_f32; + + /// High 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 highp_f64; + + +#if(defined(GLM_PRECISION_LOWP_FLOAT)) + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef lowp_float32_t float32_t; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef lowp_float64_t float64_t; + + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef lowp_f32 f32; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef lowp_f64 f64; + +#elif(defined(GLM_PRECISION_MEDIUMP_FLOAT)) + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef mediump_float32 float32_t; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef mediump_float64 float64_t; + + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef mediump_float32 f32; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef mediump_float64 f64; + +#else//(defined(GLM_PRECISION_HIGHP_FLOAT)) + + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef highp_float32_t float32_t; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef highp_float64_t float64_t; + + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef highp_float32_t f32; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef highp_float64_t f64; +#endif + + + /// Low single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, float, lowp> lowp_fvec1; + + /// Low single-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, float, lowp> lowp_fvec2; + + /// Low single-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, float, lowp> lowp_fvec3; + + /// Low single-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, float, lowp> lowp_fvec4; + + + /// Medium single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, float, mediump> mediump_fvec1; + + /// Medium Single-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, float, mediump> mediump_fvec2; + + /// Medium Single-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, float, mediump> mediump_fvec3; + + /// Medium Single-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, float, mediump> mediump_fvec4; + + + /// High single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, float, highp> highp_fvec1; + + /// High Single-qualifier floating-point vector of 2 components. + /// @see core_precision + typedef vec<2, float, highp> highp_fvec2; + + /// High Single-qualifier floating-point vector of 3 components. + /// @see core_precision + typedef vec<3, float, highp> highp_fvec3; + + /// High Single-qualifier floating-point vector of 4 components. + /// @see core_precision + typedef vec<4, float, highp> highp_fvec4; + + + /// Low single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f32, lowp> lowp_f32vec1; + + /// Low single-qualifier floating-point vector of 2 components. + /// @see core_precision + typedef vec<2, f32, lowp> lowp_f32vec2; + + /// Low single-qualifier floating-point vector of 3 components. + /// @see core_precision + typedef vec<3, f32, lowp> lowp_f32vec3; + + /// Low single-qualifier floating-point vector of 4 components. + /// @see core_precision + typedef vec<4, f32, lowp> lowp_f32vec4; + + /// Medium single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f32, mediump> mediump_f32vec1; + + /// Medium single-qualifier floating-point vector of 2 components. + /// @see core_precision + typedef vec<2, f32, mediump> mediump_f32vec2; + + /// Medium single-qualifier floating-point vector of 3 components. + /// @see core_precision + typedef vec<3, f32, mediump> mediump_f32vec3; + + /// Medium single-qualifier floating-point vector of 4 components. + /// @see core_precision + typedef vec<4, f32, mediump> mediump_f32vec4; + + /// High single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f32, highp> highp_f32vec1; + + /// High single-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f32, highp> highp_f32vec2; + + /// High single-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f32, highp> highp_f32vec3; + + /// High single-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f32, highp> highp_f32vec4; + + + /// Low double-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f64, lowp> lowp_f64vec1; + + /// Low double-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f64, lowp> lowp_f64vec2; + + /// Low double-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f64, lowp> lowp_f64vec3; + + /// Low double-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f64, lowp> lowp_f64vec4; + + /// Medium double-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f64, mediump> mediump_f64vec1; + + /// Medium double-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f64, mediump> mediump_f64vec2; + + /// Medium double-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f64, mediump> mediump_f64vec3; + + /// Medium double-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f64, mediump> mediump_f64vec4; + + /// High double-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f64, highp> highp_f64vec1; + + /// High double-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f64, highp> highp_f64vec2; + + /// High double-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f64, highp> highp_f64vec3; + + /// High double-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f64, highp> highp_f64vec4; + + + + ////////////////////// + // Float matrix types + + /// Low single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef lowp_f32 lowp_fmat1x1; + + /// Low single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, lowp> lowp_fmat2x2; + + /// Low single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, lowp> lowp_fmat2x3; + + /// Low single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, lowp> lowp_fmat2x4; + + /// Low single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, lowp> lowp_fmat3x2; + + /// Low single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, lowp> lowp_fmat3x3; + + /// Low single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, lowp> lowp_fmat3x4; + + /// Low single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, lowp> lowp_fmat4x2; + + /// Low single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, lowp> lowp_fmat4x3; + + /// Low single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, lowp> lowp_fmat4x4; + + /// Low single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef lowp_fmat1x1 lowp_fmat1; + + /// Low single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef lowp_fmat2x2 lowp_fmat2; + + /// Low single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef lowp_fmat3x3 lowp_fmat3; + + /// Low single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef lowp_fmat4x4 lowp_fmat4; + + + /// Medium single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef mediump_f32 mediump_fmat1x1; + + /// Medium single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, mediump> mediump_fmat2x2; + + /// Medium single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, mediump> mediump_fmat2x3; + + /// Medium single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, mediump> mediump_fmat2x4; + + /// Medium single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, mediump> mediump_fmat3x2; + + /// Medium single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, mediump> mediump_fmat3x3; + + /// Medium single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, mediump> mediump_fmat3x4; + + /// Medium single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, mediump> mediump_fmat4x2; + + /// Medium single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, mediump> mediump_fmat4x3; + + /// Medium single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, mediump> mediump_fmat4x4; + + /// Medium single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef mediump_fmat1x1 mediump_fmat1; + + /// Medium single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mediump_fmat2x2 mediump_fmat2; + + /// Medium single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mediump_fmat3x3 mediump_fmat3; + + /// Medium single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mediump_fmat4x4 mediump_fmat4; + + + /// High single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef highp_f32 highp_fmat1x1; + + /// High single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, highp> highp_fmat2x2; + + /// High single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, highp> highp_fmat2x3; + + /// High single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, highp> highp_fmat2x4; + + /// High single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, highp> highp_fmat3x2; + + /// High single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, highp> highp_fmat3x3; + + /// High single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, highp> highp_fmat3x4; + + /// High single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, highp> highp_fmat4x2; + + /// High single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, highp> highp_fmat4x3; + + /// High single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, highp> highp_fmat4x4; + + /// High single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef highp_fmat1x1 highp_fmat1; + + /// High single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef highp_fmat2x2 highp_fmat2; + + /// High single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef highp_fmat3x3 highp_fmat3; + + /// High single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef highp_fmat4x4 highp_fmat4; + + + /// Low single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f32 lowp_f32mat1x1; + + /// Low single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, lowp> lowp_f32mat2x2; + + /// Low single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, lowp> lowp_f32mat2x3; + + /// Low single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, lowp> lowp_f32mat2x4; + + /// Low single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, lowp> lowp_f32mat3x2; + + /// Low single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, lowp> lowp_f32mat3x3; + + /// Low single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, lowp> lowp_f32mat3x4; + + /// Low single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, lowp> lowp_f32mat4x2; + + /// Low single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, lowp> lowp_f32mat4x3; + + /// Low single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, lowp> lowp_f32mat4x4; + + /// Low single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 lowp_f32mat1; + + /// Low single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef lowp_f32mat2x2 lowp_f32mat2; + + /// Low single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef lowp_f32mat3x3 lowp_f32mat3; + + /// Low single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef lowp_f32mat4x4 lowp_f32mat4; + + + /// High single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f32 mediump_f32mat1x1; + + /// Low single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, mediump> mediump_f32mat2x2; + + /// Medium single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, mediump> mediump_f32mat2x3; + + /// Medium single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, mediump> mediump_f32mat2x4; + + /// Medium single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, mediump> mediump_f32mat3x2; + + /// Medium single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, mediump> mediump_f32mat3x3; + + /// Medium single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, mediump> mediump_f32mat3x4; + + /// Medium single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, mediump> mediump_f32mat4x2; + + /// Medium single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, mediump> mediump_f32mat4x3; + + /// Medium single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, mediump> mediump_f32mat4x4; + + /// Medium single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 f32mat1; + + /// Medium single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mediump_f32mat2x2 mediump_f32mat2; + + /// Medium single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mediump_f32mat3x3 mediump_f32mat3; + + /// Medium single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mediump_f32mat4x4 mediump_f32mat4; + + + /// High single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f32 highp_f32mat1x1; + + /// High single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, highp> highp_f32mat2x2; + + /// High single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, highp> highp_f32mat2x3; + + /// High single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, highp> highp_f32mat2x4; + + /// High single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, highp> highp_f32mat3x2; + + /// High single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, highp> highp_f32mat3x3; + + /// High single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, highp> highp_f32mat3x4; + + /// High single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, highp> highp_f32mat4x2; + + /// High single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, highp> highp_f32mat4x3; + + /// High single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, highp> highp_f32mat4x4; + + /// High single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 f32mat1; + + /// High single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef highp_f32mat2x2 highp_f32mat2; + + /// High single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef highp_f32mat3x3 highp_f32mat3; + + /// High single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef highp_f32mat4x4 highp_f32mat4; + + + /// Low double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f64 lowp_f64mat1x1; + + /// Low double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f64, lowp> lowp_f64mat2x2; + + /// Low double-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f64, lowp> lowp_f64mat2x3; + + /// Low double-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f64, lowp> lowp_f64mat2x4; + + /// Low double-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f64, lowp> lowp_f64mat3x2; + + /// Low double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f64, lowp> lowp_f64mat3x3; + + /// Low double-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f64, lowp> lowp_f64mat3x4; + + /// Low double-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f64, lowp> lowp_f64mat4x2; + + /// Low double-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f64, lowp> lowp_f64mat4x3; + + /// Low double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f64, lowp> lowp_f64mat4x4; + + /// Low double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef lowp_f64mat1x1 lowp_f64mat1; + + /// Low double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef lowp_f64mat2x2 lowp_f64mat2; + + /// Low double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef lowp_f64mat3x3 lowp_f64mat3; + + /// Low double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef lowp_f64mat4x4 lowp_f64mat4; + + + /// Medium double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f64 Highp_f64mat1x1; + + /// Medium double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f64, mediump> mediump_f64mat2x2; + + /// Medium double-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f64, mediump> mediump_f64mat2x3; + + /// Medium double-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f64, mediump> mediump_f64mat2x4; + + /// Medium double-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f64, mediump> mediump_f64mat3x2; + + /// Medium double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f64, mediump> mediump_f64mat3x3; + + /// Medium double-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f64, mediump> mediump_f64mat3x4; + + /// Medium double-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f64, mediump> mediump_f64mat4x2; + + /// Medium double-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f64, mediump> mediump_f64mat4x3; + + /// Medium double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f64, mediump> mediump_f64mat4x4; + + /// Medium double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef mediump_f64mat1x1 mediump_f64mat1; + + /// Medium double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mediump_f64mat2x2 mediump_f64mat2; + + /// Medium double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mediump_f64mat3x3 mediump_f64mat3; + + /// Medium double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mediump_f64mat4x4 mediump_f64mat4; + + /// High double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f64 highp_f64mat1x1; + + /// High double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f64, highp> highp_f64mat2x2; + + /// High double-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f64, highp> highp_f64mat2x3; + + /// High double-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f64, highp> highp_f64mat2x4; + + /// High double-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f64, highp> highp_f64mat3x2; + + /// High double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f64, highp> highp_f64mat3x3; + + /// High double-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f64, highp> highp_f64mat3x4; + + /// High double-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f64, highp> highp_f64mat4x2; + + /// High double-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f64, highp> highp_f64mat4x3; + + /// High double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f64, highp> highp_f64mat4x4; + + /// High double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef highp_f64mat1x1 highp_f64mat1; + + /// High double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef highp_f64mat2x2 highp_f64mat2; + + /// High double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef highp_f64mat3x3 highp_f64mat3; + + /// High double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef highp_f64mat4x4 highp_f64mat4; + + + ///////////////////////////// + // Signed int vector types + + /// Low qualifier signed integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, int, lowp> lowp_ivec1; + + /// Low qualifier signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, int, lowp> lowp_ivec2; + + /// Low qualifier signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, int, lowp> lowp_ivec3; + + /// Low qualifier signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, int, lowp> lowp_ivec4; + + + /// Medium qualifier signed integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, int, mediump> mediump_ivec1; + + /// Medium qualifier signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, int, mediump> mediump_ivec2; + + /// Medium qualifier signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, int, mediump> mediump_ivec3; + + /// Medium qualifier signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, int, mediump> mediump_ivec4; + + + /// High qualifier signed integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, int, highp> highp_ivec1; + + /// High qualifier signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, int, highp> highp_ivec2; + + /// High qualifier signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, int, highp> highp_ivec3; + + /// High qualifier signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, int, highp> highp_ivec4; + + + /// Low qualifier 8 bit signed integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, i8, lowp> lowp_i8vec1; + + /// Low qualifier 8 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i8, lowp> lowp_i8vec2; + + /// Low qualifier 8 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i8, lowp> lowp_i8vec3; + + /// Low qualifier 8 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i8, lowp> lowp_i8vec4; + + + /// Medium qualifier 8 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i8, mediump> mediump_i8vec1; + + /// Medium qualifier 8 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i8, mediump> mediump_i8vec2; + + /// Medium qualifier 8 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i8, mediump> mediump_i8vec3; + + /// Medium qualifier 8 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i8, mediump> mediump_i8vec4; + + + /// High qualifier 8 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i8, highp> highp_i8vec1; + + /// High qualifier 8 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i8, highp> highp_i8vec2; + + /// High qualifier 8 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i8, highp> highp_i8vec3; + + /// High qualifier 8 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i8, highp> highp_i8vec4; + + + /// Low qualifier 16 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i16, lowp> lowp_i16vec1; + + /// Low qualifier 16 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i16, lowp> lowp_i16vec2; + + /// Low qualifier 16 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i16, lowp> lowp_i16vec3; + + /// Low qualifier 16 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i16, lowp> lowp_i16vec4; + + + /// Medium qualifier 16 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i16, mediump> mediump_i16vec1; + + /// Medium qualifier 16 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i16, mediump> mediump_i16vec2; + + /// Medium qualifier 16 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i16, mediump> mediump_i16vec3; + + /// Medium qualifier 16 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i16, mediump> mediump_i16vec4; + + + /// High qualifier 16 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i16, highp> highp_i16vec1; + + /// High qualifier 16 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i16, highp> highp_i16vec2; + + /// High qualifier 16 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i16, highp> highp_i16vec3; + + /// High qualifier 16 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i16, highp> highp_i16vec4; + + + /// Low qualifier 32 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i32, lowp> lowp_i32vec1; + + /// Low qualifier 32 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i32, lowp> lowp_i32vec2; + + /// Low qualifier 32 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i32, lowp> lowp_i32vec3; + + /// Low qualifier 32 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i32, lowp> lowp_i32vec4; + + + /// Medium qualifier 32 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i32, mediump> mediump_i32vec1; + + /// Medium qualifier 32 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i32, mediump> mediump_i32vec2; + + /// Medium qualifier 32 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i32, mediump> mediump_i32vec3; + + /// Medium qualifier 32 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i32, mediump> mediump_i32vec4; + + + /// High qualifier 32 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i32, highp> highp_i32vec1; + + /// High qualifier 32 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i32, highp> highp_i32vec2; + + /// High qualifier 32 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i32, highp> highp_i32vec3; + + /// High qualifier 32 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i32, highp> highp_i32vec4; + + + /// Low qualifier 64 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i64, lowp> lowp_i64vec1; + + /// Low qualifier 64 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i64, lowp> lowp_i64vec2; + + /// Low qualifier 64 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i64, lowp> lowp_i64vec3; + + /// Low qualifier 64 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i64, lowp> lowp_i64vec4; + + + /// Medium qualifier 64 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i64, mediump> mediump_i64vec1; + + /// Medium qualifier 64 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i64, mediump> mediump_i64vec2; + + /// Medium qualifier 64 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i64, mediump> mediump_i64vec3; + + /// Medium qualifier 64 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i64, mediump> mediump_i64vec4; + + + /// High qualifier 64 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i64, highp> highp_i64vec1; + + /// High qualifier 64 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i64, highp> highp_i64vec2; + + /// High qualifier 64 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i64, highp> highp_i64vec3; + + /// High qualifier 64 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i64, highp> highp_i64vec4; + + + ///////////////////////////// + // Unsigned int vector types + + /// Low qualifier unsigned integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, uint, lowp> lowp_uvec1; + + /// Low qualifier unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, uint, lowp> lowp_uvec2; + + /// Low qualifier unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, uint, lowp> lowp_uvec3; + + /// Low qualifier unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, uint, lowp> lowp_uvec4; + + + /// Medium qualifier unsigned integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, uint, mediump> mediump_uvec1; + + /// Medium qualifier unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, uint, mediump> mediump_uvec2; + + /// Medium qualifier unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, uint, mediump> mediump_uvec3; + + /// Medium qualifier unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, uint, mediump> mediump_uvec4; + + + /// High qualifier unsigned integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, uint, highp> highp_uvec1; + + /// High qualifier unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, uint, highp> highp_uvec2; + + /// High qualifier unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, uint, highp> highp_uvec3; + + /// High qualifier unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, uint, highp> highp_uvec4; + + + /// Low qualifier 8 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u8, lowp> lowp_u8vec1; + + /// Low qualifier 8 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u8, lowp> lowp_u8vec2; + + /// Low qualifier 8 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u8, lowp> lowp_u8vec3; + + /// Low qualifier 8 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u8, lowp> lowp_u8vec4; + + + /// Medium qualifier 8 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u8, mediump> mediump_u8vec1; + + /// Medium qualifier 8 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u8, mediump> mediump_u8vec2; + + /// Medium qualifier 8 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u8, mediump> mediump_u8vec3; + + /// Medium qualifier 8 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u8, mediump> mediump_u8vec4; + + + /// High qualifier 8 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u8, highp> highp_u8vec1; + + /// High qualifier 8 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u8, highp> highp_u8vec2; + + /// High qualifier 8 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u8, highp> highp_u8vec3; + + /// High qualifier 8 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u8, highp> highp_u8vec4; + + + /// Low qualifier 16 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u16, lowp> lowp_u16vec1; + + /// Low qualifier 16 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u16, lowp> lowp_u16vec2; + + /// Low qualifier 16 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u16, lowp> lowp_u16vec3; + + /// Low qualifier 16 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u16, lowp> lowp_u16vec4; + + + /// Medium qualifier 16 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u16, mediump> mediump_u16vec1; + + /// Medium qualifier 16 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u16, mediump> mediump_u16vec2; + + /// Medium qualifier 16 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u16, mediump> mediump_u16vec3; + + /// Medium qualifier 16 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u16, mediump> mediump_u16vec4; + + + /// High qualifier 16 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u16, highp> highp_u16vec1; + + /// High qualifier 16 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u16, highp> highp_u16vec2; + + /// High qualifier 16 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u16, highp> highp_u16vec3; + + /// High qualifier 16 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u16, highp> highp_u16vec4; + + + /// Low qualifier 32 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u32, lowp> lowp_u32vec1; + + /// Low qualifier 32 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u32, lowp> lowp_u32vec2; + + /// Low qualifier 32 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u32, lowp> lowp_u32vec3; + + /// Low qualifier 32 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u32, lowp> lowp_u32vec4; + + + /// Medium qualifier 32 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u32, mediump> mediump_u32vec1; + + /// Medium qualifier 32 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u32, mediump> mediump_u32vec2; + + /// Medium qualifier 32 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u32, mediump> mediump_u32vec3; + + /// Medium qualifier 32 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u32, mediump> mediump_u32vec4; + + + /// High qualifier 32 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u32, highp> highp_u32vec1; + + /// High qualifier 32 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u32, highp> highp_u32vec2; + + /// High qualifier 32 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u32, highp> highp_u32vec3; + + /// High qualifier 32 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u32, highp> highp_u32vec4; + + + /// Low qualifier 64 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u64, lowp> lowp_u64vec1; + + /// Low qualifier 64 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u64, lowp> lowp_u64vec2; + + /// Low qualifier 64 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u64, lowp> lowp_u64vec3; + + /// Low qualifier 64 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u64, lowp> lowp_u64vec4; + + + /// Medium qualifier 64 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u64, mediump> mediump_u64vec1; + + /// Medium qualifier 64 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u64, mediump> mediump_u64vec2; + + /// Medium qualifier 64 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u64, mediump> mediump_u64vec3; + + /// Medium qualifier 64 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u64, mediump> mediump_u64vec4; + + + /// High qualifier 64 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u64, highp> highp_u64vec1; + + /// High qualifier 64 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u64, highp> highp_u64vec2; + + /// High qualifier 64 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u64, highp> highp_u64vec3; + + /// High qualifier 64 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u64, highp> highp_u64vec4; + + + ////////////////////// + // Float vector types + + /// 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 float32_t; + + /// 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 f32; + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 float64_t; + + /// 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 f64; +# endif//GLM_FORCE_SINGLE_ONLY + + /// Single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, float, defaultp> fvec1; + + /// Single-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, float, defaultp> fvec2; + + /// Single-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, float, defaultp> fvec3; + + /// Single-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, float, defaultp> fvec4; + + + /// Single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f32, defaultp> f32vec1; + + /// Single-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f32, defaultp> f32vec2; + + /// Single-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f32, defaultp> f32vec3; + + /// Single-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f32, defaultp> f32vec4; + +# ifndef GLM_FORCE_SINGLE_ONLY + /// Double-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f64, defaultp> f64vec1; + + /// Double-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f64, defaultp> f64vec2; + + /// Double-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f64, defaultp> f64vec3; + + /// Double-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f64, defaultp> f64vec4; +# endif//GLM_FORCE_SINGLE_ONLY + + + ////////////////////// + // Float matrix types + + /// Single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 fmat1; + + /// Single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, defaultp> fmat2; + + /// Single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, defaultp> fmat3; + + /// Single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, defaultp> fmat4; + + + /// Single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f32 fmat1x1; + + /// Single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, defaultp> fmat2x2; + + /// Single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, defaultp> fmat2x3; + + /// Single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, defaultp> fmat2x4; + + /// Single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, defaultp> fmat3x2; + + /// Single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, defaultp> fmat3x3; + + /// Single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, defaultp> fmat3x4; + + /// Single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, defaultp> fmat4x2; + + /// Single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, defaultp> fmat4x3; + + /// Single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, defaultp> fmat4x4; + + + /// Single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 f32mat1; + + /// Single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, defaultp> f32mat2; + + /// Single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, defaultp> f32mat3; + + /// Single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, defaultp> f32mat4; + + + /// Single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f32 f32mat1x1; + + /// Single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, defaultp> f32mat2x2; + + /// Single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, defaultp> f32mat2x3; + + /// Single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, defaultp> f32mat2x4; + + /// Single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, defaultp> f32mat3x2; + + /// Single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, defaultp> f32mat3x3; + + /// Single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, defaultp> f32mat3x4; + + /// Single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, defaultp> f32mat4x2; + + /// Single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, defaultp> f32mat4x3; + + /// Single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, defaultp> f32mat4x4; + + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// Double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 f64mat1; + + /// Double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f64, defaultp> f64mat2; + + /// Double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f64, defaultp> f64mat3; + + /// Double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f64, defaultp> f64mat4; + + + /// Double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f64 f64mat1x1; + + /// Double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f64, defaultp> f64mat2x2; + + /// Double-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f64, defaultp> f64mat2x3; + + /// Double-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f64, defaultp> f64mat2x4; + + /// Double-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f64, defaultp> f64mat3x2; + + /// Double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f64, defaultp> f64mat3x3; + + /// Double-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f64, defaultp> f64mat3x4; + + /// Double-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f64, defaultp> f64mat4x2; + + /// Double-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f64, defaultp> f64mat4x3; + + /// Double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f64, defaultp> f64mat4x4; + +# endif//GLM_FORCE_SINGLE_ONLY + + ////////////////////////// + // Quaternion types + + /// Single-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua f32quat; + + /// Low single-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua lowp_f32quat; + + /// Low double-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua lowp_f64quat; + + /// Medium single-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua mediump_f32quat; + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// Medium double-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua mediump_f64quat; + + /// High single-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua highp_f32quat; + + /// High double-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua highp_f64quat; + + /// Double-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua f64quat; + +# endif//GLM_FORCE_SINGLE_ONLY + + /// @} +}//namespace glm + +#include "type_precision.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/type_precision.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/type_precision.inl new file mode 100644 index 000000000000..ae8091206bd4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/type_precision.inl @@ -0,0 +1,6 @@ +/// @ref gtc_precision + +namespace glm +{ + +} diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/type_ptr.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/type_ptr.hpp new file mode 100644 index 000000000000..d7e625aa5917 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/type_ptr.hpp @@ -0,0 +1,230 @@ +/// @ref gtc_type_ptr +/// @file glm/gtc/type_ptr.hpp +/// +/// @see core (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtc_type_ptr GLM_GTC_type_ptr +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Handles the interaction between pointers and vector, matrix types. +/// +/// This extension defines an overloaded function, glm::value_ptr. It returns +/// a pointer to the memory layout of the object. Matrix types store their values +/// in column-major order. +/// +/// This is useful for uploading data to matrices or copying data to buffer objects. +/// +/// Example: +/// @code +/// #include +/// #include +/// +/// glm::vec3 aVector(3); +/// glm::mat4 someMatrix(1.0); +/// +/// glUniform3fv(uniformLoc, 1, glm::value_ptr(aVector)); +/// glUniformMatrix4fv(uniformMatrixLoc, 1, GL_FALSE, glm::value_ptr(someMatrix)); +/// @endcode +/// +/// need to be included to use the features of this extension. + +#pragma once + +// Dependency: +#include "../gtc/quaternion.hpp" +#include "../gtc/vec1.hpp" +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../mat2x2.hpp" +#include "../mat2x3.hpp" +#include "../mat2x4.hpp" +#include "../mat3x2.hpp" +#include "../mat3x3.hpp" +#include "../mat3x4.hpp" +#include "../mat4x2.hpp" +#include "../mat4x3.hpp" +#include "../mat4x4.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_type_ptr extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_type_ptr + /// @{ + + /// Return the constant address to the data of the input parameter. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL typename genType::value_type const * value_ptr(genType const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<1, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<2, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<3, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<4, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<1, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<2, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<3, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<4, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<1, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<2, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<3, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<4, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<1, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<2, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<3, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<4, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<2, T, defaultp> make_vec2(T const * const ptr); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<3, T, defaultp> make_vec3(T const * const ptr); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<4, T, defaultp> make_vec4(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<2, 2, T, defaultp> make_mat2x2(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<2, 3, T, defaultp> make_mat2x3(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<2, 4, T, defaultp> make_mat2x4(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<3, 2, T, defaultp> make_mat3x2(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<3, 3, T, defaultp> make_mat3x3(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<3, 4, T, defaultp> make_mat3x4(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<4, 2, T, defaultp> make_mat4x2(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<4, 3, T, defaultp> make_mat4x3(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> make_mat4x4(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<2, 2, T, defaultp> make_mat2(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<3, 3, T, defaultp> make_mat3(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> make_mat4(T const * const ptr); + + /// Build a quaternion from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL qua make_quat(T const * const ptr); + + /// @} +}//namespace glm + +#include "type_ptr.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/type_ptr.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/type_ptr.inl new file mode 100644 index 000000000000..26b20b52e9a9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/type_ptr.inl @@ -0,0 +1,386 @@ +/// @ref gtc_type_ptr + +#include + +namespace glm +{ + /// @addtogroup gtc_type_ptr + /// @{ + + template + GLM_FUNC_QUALIFIER T const* value_ptr(vec<2, T, Q> const& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(vec<2, T, Q>& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T const * value_ptr(vec<3, T, Q> const& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(vec<3, T, Q>& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(vec<4, T, Q> const& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(vec<4, T, Q>& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 2, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 2, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 3, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 3, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 4, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<4, 4, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 3, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 3, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 2, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 2, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 4, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 4, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 2, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<4, 2, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 4, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 4, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 3, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T * value_ptr(mat<4, 3, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const * value_ptr(qua const& q) + { + return &(q[0]); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(qua& q) + { + return &(q[0]); + } + + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<1, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<2, T, Q> const& v) + { + return vec<1, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<3, T, Q> const& v) + { + return vec<1, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<4, T, Q> const& v) + { + return vec<1, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<1, T, Q> const& v) + { + return vec<2, T, Q>(v.x, static_cast(0)); + } + + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<2, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<3, T, Q> const& v) + { + return vec<2, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<4, T, Q> const& v) + { + return vec<2, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<1, T, Q> const& v) + { + return vec<3, T, Q>(v.x, static_cast(0), static_cast(0)); + } + + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<2, T, Q> const& v) + { + return vec<3, T, Q>(v.x, v.y, static_cast(0)); + } + + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<3, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<4, T, Q> const& v) + { + return vec<3, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<1, T, Q> const& v) + { + return vec<4, T, Q>(v.x, static_cast(0), static_cast(0), static_cast(1)); + } + + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<2, T, Q> const& v) + { + return vec<4, T, Q>(v.x, v.y, static_cast(0), static_cast(1)); + } + + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<3, T, Q> const& v) + { + return vec<4, T, Q>(v.x, v.y, v.z, static_cast(1)); + } + + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<4, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, defaultp> make_vec2(T const *const ptr) + { + vec<2, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(vec<2, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, defaultp> make_vec3(T const *const ptr) + { + vec<3, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(vec<3, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, defaultp> make_vec4(T const *const ptr) + { + vec<4, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(vec<4, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> make_mat2x2(T const *const ptr) + { + mat<2, 2, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<2, 2, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 3, T, defaultp> make_mat2x3(T const *const ptr) + { + mat<2, 3, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<2, 3, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 4, T, defaultp> make_mat2x4(T const *const ptr) + { + mat<2, 4, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<2, 4, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 2, T, defaultp> make_mat3x2(T const *const ptr) + { + mat<3, 2, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<3, 2, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> make_mat3x3(T const *const ptr) + { + mat<3, 3, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<3, 3, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 4, T, defaultp> make_mat3x4(T const *const ptr) + { + mat<3, 4, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<3, 4, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 2, T, defaultp> make_mat4x2(T const *const ptr) + { + mat<4, 2, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<4, 2, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 3, T, defaultp> make_mat4x3(T const *const ptr) + { + mat<4, 3, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<4, 3, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> make_mat4x4(T const *const ptr) + { + mat<4, 4, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<4, 4, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> make_mat2(T const *const ptr) + { + return make_mat2x2(ptr); + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> make_mat3(T const *const ptr) + { + return make_mat3x3(ptr); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> make_mat4(T const *const ptr) + { + return make_mat4x4(ptr); + } + + template + GLM_FUNC_QUALIFIER qua make_quat(T const *const ptr) + { + qua Result; + memcpy(value_ptr(Result), ptr, sizeof(qua)); + return Result; + } + + /// @} +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/ulp.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/ulp.hpp new file mode 100644 index 000000000000..7b918f0fa764 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/ulp.hpp @@ -0,0 +1,155 @@ +/// @ref gtc_ulp +/// @file glm/gtc/ulp.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_ulp GLM_GTC_ulp +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Allow the measurement of the accuracy of a function against a reference +/// implementation. This extension works on floating-point data and provide results +/// in ULP. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_vectorize.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_ulp extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_ulp + /// @{ + + /// Return the next ULP value(s) after the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL genType next_float(genType x); + + /// Return the previous ULP value(s) before the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL genType prev_float(genType x); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL genType next_float(genType x, int ULPs); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL genType prev_float(genType x, int ULPs); + + /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. + /// + /// @see gtc_ulp + GLM_FUNC_DECL int float_distance(float x, float y); + + /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. + /// + /// @see gtc_ulp + GLM_FUNC_DECL int64 float_distance(double x, double y); + + /// Return the next ULP value(s) after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec next_float(vec const& x); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec next_float(vec const& x, int ULPs); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec next_float(vec const& x, vec const& ULPs); + + /// Return the previous ULP value(s) before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec prev_float(vec const& x); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec prev_float(vec const& x, int ULPs); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec prev_float(vec const& x, vec const& ULPs); + + /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec float_distance(vec const& x, vec const& y); + + /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec float_distance(vec const& x, vec const& y); + + /// @} +}//namespace glm + +#include "ulp.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/ulp.inl b/thirdparty/manifold/thirdparty/glm/glm/gtc/ulp.inl new file mode 100644 index 000000000000..836c84b4a250 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/ulp.inl @@ -0,0 +1,173 @@ +/// @ref gtc_ulp + +#include "../ext/scalar_ulp.hpp" + +namespace glm +{ + template<> + GLM_FUNC_QUALIFIER float next_float(float x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::max()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafterf(x, FLT_MAX); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafterf(x, FLT_MAX); +# else + return nextafterf(x, FLT_MAX); +# endif + } + + template<> + GLM_FUNC_QUALIFIER double next_float(double x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::max()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafter(x, std::numeric_limits::max()); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafter(x, DBL_MAX); +# else + return nextafter(x, DBL_MAX); +# endif + } + + template + GLM_FUNC_QUALIFIER T next_float(T x, int ULPs) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'next_float' only accept floating-point input"); + assert(ULPs >= 0); + + T temp = x; + for (int i = 0; i < ULPs; ++i) + temp = next_float(temp); + return temp; + } + + GLM_FUNC_QUALIFIER float prev_float(float x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::min()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafterf(x, FLT_MIN); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafterf(x, FLT_MIN); +# else + return nextafterf(x, FLT_MIN); +# endif + } + + GLM_FUNC_QUALIFIER double prev_float(double x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::min()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return _nextafter(x, DBL_MIN); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafter(x, DBL_MIN); +# else + return nextafter(x, DBL_MIN); +# endif + } + + template + GLM_FUNC_QUALIFIER T prev_float(T x, int ULPs) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'prev_float' only accept floating-point input"); + assert(ULPs >= 0); + + T temp = x; + for (int i = 0; i < ULPs; ++i) + temp = prev_float(temp); + return temp; + } + + GLM_FUNC_QUALIFIER int float_distance(float x, float y) + { + detail::float_t const a(x); + detail::float_t const b(y); + + return abs(a.i - b.i); + } + + GLM_FUNC_QUALIFIER int64 float_distance(double x, double y) + { + detail::float_t const a(x); + detail::float_t const b(y); + + return abs(a.i - b.i); + } + + template + GLM_FUNC_QUALIFIER vec next_float(vec const& x) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = next_float(x[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec next_float(vec const& x, int ULPs) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = next_float(x[i], ULPs); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec next_float(vec const& x, vec const& ULPs) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = next_float(x[i], ULPs[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prev_float(vec const& x) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prev_float(x[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prev_float(vec const& x, int ULPs) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prev_float(x[i], ULPs); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prev_float(vec const& x, vec const& ULPs) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prev_float(x[i], ULPs[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec float_distance(vec const& x, vec const& y) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = float_distance(x[i], y[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec float_distance(vec const& x, vec const& y) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = float_distance(x[i], y[i]); + return Result; + } +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtc/vec1.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtc/vec1.hpp new file mode 100644 index 000000000000..63697a215750 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtc/vec1.hpp @@ -0,0 +1,30 @@ +/// @ref gtc_vec1 +/// @file glm/gtc/vec1.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_vec1 GLM_GTC_vec1 +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Add vec1, ivec1, uvec1 and bvec1 types. + +#pragma once + +// Dependency: +#include "../ext/vector_bool1.hpp" +#include "../ext/vector_bool1_precision.hpp" +#include "../ext/vector_float1.hpp" +#include "../ext/vector_float1_precision.hpp" +#include "../ext/vector_double1.hpp" +#include "../ext/vector_double1_precision.hpp" +#include "../ext/vector_int1.hpp" +#include "../ext/vector_int1_sized.hpp" +#include "../ext/vector_uint1.hpp" +#include "../ext/vector_uint1_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_vec1 extension included") +#endif + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/associated_min_max.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/associated_min_max.hpp new file mode 100644 index 000000000000..4c036add2186 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/associated_min_max.hpp @@ -0,0 +1,207 @@ +/// @ref gtx_associated_min_max +/// @file glm/gtx/associated_min_max.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_associated_min_max GLM_GTX_associated_min_max +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// @brief Min and max functions that return associated values not the compared ones. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_associated_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_associated_min_max extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_associated_min_max + /// @{ + + /// Minimum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMin(T x, U a, T y, U b); + + /// Minimum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + vec const& x, vec const& a, + vec const& y, vec const& b); + + /// Minimum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + T x, const vec& a, + T y, const vec& b); + + /// Minimum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + vec const& x, U a, + vec const& y, U b); + + /// Minimum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMin( + T x, U a, + T y, U b, + T z, U c); + + /// Minimum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c); + + /// Minimum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMin( + T x, U a, + T y, U b, + T z, U c, + T w, U d); + + /// Minimum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c, + vec const& w, vec const& d); + + /// Minimum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c, + T w, vec const& d); + + /// Minimum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c, + vec const& w, U d); + + /// Maximum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMax(T x, U a, T y, U b); + + /// Maximum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, vec const& a, + vec const& y, vec const& b); + + /// Maximum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + T x, vec const& a, + T y, vec const& b); + + /// Maximum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, U a, + vec const& y, U b); + + /// Maximum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMax( + T x, U a, + T y, U b, + T z, U c); + + /// Maximum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c); + + /// Maximum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c); + + /// Maximum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c); + + /// Maximum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMax( + T x, U a, + T y, U b, + T z, U c, + T w, U d); + + /// Maximum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c, + vec const& w, vec const& d); + + /// Maximum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c, + T w, vec const& d); + + /// Maximum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c, + vec const& w, U d); + + /// @} +} //namespace glm + +#include "associated_min_max.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/associated_min_max.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/associated_min_max.inl new file mode 100644 index 000000000000..f09f5bb74c25 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/associated_min_max.inl @@ -0,0 +1,354 @@ +/// @ref gtx_associated_min_max + +namespace glm{ + +// Min comparison between 2 variables +template +GLM_FUNC_QUALIFIER U associatedMin(T x, U a, T y, U b) +{ + return x < y ? a : b; +} + +template +GLM_FUNC_QUALIFIER vec associatedMin +( + vec const& x, vec const& a, + vec const& y, vec const& b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] < y[i] ? a[i] : b[i]; + return Result; +} + +template +GLM_FUNC_QUALIFIER vec associatedMin +( + T x, const vec& a, + T y, const vec& b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x < y ? a[i] : b[i]; + return Result; +} + +template +GLM_FUNC_QUALIFIER vec associatedMin +( + vec const& x, U a, + vec const& y, U b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] < y[i] ? a : b; + return Result; +} + +// Min comparison between 3 variables +template +GLM_FUNC_QUALIFIER U associatedMin +( + T x, U a, + T y, U b, + T z, U c +) +{ + U Result = x < y ? (x < z ? a : c) : (y < z ? b : c); + return Result; +} + +template +GLM_FUNC_QUALIFIER vec associatedMin +( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] < y[i] ? (x[i] < z[i] ? a[i] : c[i]) : (y[i] < z[i] ? b[i] : c[i]); + return Result; +} + +// Min comparison between 4 variables +template +GLM_FUNC_QUALIFIER U associatedMin +( + T x, U a, + T y, U b, + T z, U c, + T w, U d +) +{ + T Test1 = min(x, y); + T Test2 = min(z, w); + U Result1 = x < y ? a : b; + U Result2 = z < w ? c : d; + U Result = Test1 < Test2 ? Result1 : Result2; + return Result; +} + +// Min comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMin +( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c, + vec const& w, vec const& d +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + T Test1 = min(x[i], y[i]); + T Test2 = min(z[i], w[i]); + U Result1 = x[i] < y[i] ? a[i] : b[i]; + U Result2 = z[i] < w[i] ? c[i] : d[i]; + Result[i] = Test1 < Test2 ? Result1 : Result2; + } + return Result; +} + +// Min comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMin +( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c, + T w, vec const& d +) +{ + T Test1 = min(x, y); + T Test2 = min(z, w); + + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + U Result1 = x < y ? a[i] : b[i]; + U Result2 = z < w ? c[i] : d[i]; + Result[i] = Test1 < Test2 ? Result1 : Result2; + } + return Result; +} + +// Min comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMin +( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c, + vec const& w, U d +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + T Test1 = min(x[i], y[i]); + T Test2 = min(z[i], w[i]); + U Result1 = x[i] < y[i] ? a : b; + U Result2 = z[i] < w[i] ? c : d; + Result[i] = Test1 < Test2 ? Result1 : Result2; + } + return Result; +} + +// Max comparison between 2 variables +template +GLM_FUNC_QUALIFIER U associatedMax(T x, U a, T y, U b) +{ + return x > y ? a : b; +} + +// Max comparison between 2 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, vec const& a, + vec const& y, vec const& b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] > y[i] ? a[i] : b[i]; + return Result; +} + +// Max comparison between 2 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + T x, vec const& a, + T y, vec const& b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x > y ? a[i] : b[i]; + return Result; +} + +// Max comparison between 2 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, U a, + vec const& y, U b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] > y[i] ? a : b; + return Result; +} + +// Max comparison between 3 variables +template +GLM_FUNC_QUALIFIER U associatedMax +( + T x, U a, + T y, U b, + T z, U c +) +{ + U Result = x > y ? (x > z ? a : c) : (y > z ? b : c); + return Result; +} + +// Max comparison between 3 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] > y[i] ? (x[i] > z[i] ? a[i] : c[i]) : (y[i] > z[i] ? b[i] : c[i]); + return Result; +} + +// Max comparison between 3 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x > y ? (x > z ? a[i] : c[i]) : (y > z ? b[i] : c[i]); + return Result; +} + +// Max comparison between 3 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] > y[i] ? (x[i] > z[i] ? a : c) : (y[i] > z[i] ? b : c); + return Result; +} + +// Max comparison between 4 variables +template +GLM_FUNC_QUALIFIER U associatedMax +( + T x, U a, + T y, U b, + T z, U c, + T w, U d +) +{ + T Test1 = max(x, y); + T Test2 = max(z, w); + U Result1 = x > y ? a : b; + U Result2 = z > w ? c : d; + U Result = Test1 > Test2 ? Result1 : Result2; + return Result; +} + +// Max comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c, + vec const& w, vec const& d +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + T Test1 = max(x[i], y[i]); + T Test2 = max(z[i], w[i]); + U Result1 = x[i] > y[i] ? a[i] : b[i]; + U Result2 = z[i] > w[i] ? c[i] : d[i]; + Result[i] = Test1 > Test2 ? Result1 : Result2; + } + return Result; +} + +// Max comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c, + T w, vec const& d +) +{ + T Test1 = max(x, y); + T Test2 = max(z, w); + + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + U Result1 = x > y ? a[i] : b[i]; + U Result2 = z > w ? c[i] : d[i]; + Result[i] = Test1 > Test2 ? Result1 : Result2; + } + return Result; +} + +// Max comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c, + vec const& w, U d +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + T Test1 = max(x[i], y[i]); + T Test2 = max(z[i], w[i]); + U Result1 = x[i] > y[i] ? a : b; + U Result2 = z[i] > w[i] ? c : d; + Result[i] = Test1 > Test2 ? Result1 : Result2; + } + return Result; +} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/bit.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/bit.hpp new file mode 100644 index 000000000000..60a7aef1b463 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/bit.hpp @@ -0,0 +1,98 @@ +/// @ref gtx_bit +/// @file glm/gtx/bit.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_bit GLM_GTX_bit +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Allow to perform bit operations on integer values + +#pragma once + +// Dependencies +#include "../gtc/bitfield.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_bit is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_bit extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_bit + /// @{ + + /// @see gtx_bit + template + GLM_FUNC_DECL genIUType highestBitValue(genIUType Value); + + /// @see gtx_bit + template + GLM_FUNC_DECL genIUType lowestBitValue(genIUType Value); + + /// Find the highest bit set to 1 in a integer variable and return its value. + /// + /// @see gtx_bit + template + GLM_FUNC_DECL vec highestBitValue(vec const& value); + + /// Return the power of two number which value is just higher the input value. + /// Deprecated, use ceilPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoAbove(genIUType Value); + + /// Return the power of two number which value is just higher the input value. + /// Deprecated, use ceilPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoAbove(vec const& value); + + /// Return the power of two number which value is just lower the input value. + /// Deprecated, use floorPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoBelow(genIUType Value); + + /// Return the power of two number which value is just lower the input value. + /// Deprecated, use floorPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoBelow(vec const& value); + + /// Return the power of two number which value is the closet to the input value. + /// Deprecated, use roundPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoNearest(genIUType Value); + + /// Return the power of two number which value is the closet to the input value. + /// Deprecated, use roundPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoNearest(vec const& value); + + /// @} +} //namespace glm + + +#include "bit.inl" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/bit.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/bit.inl new file mode 100644 index 000000000000..621b6262406d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/bit.inl @@ -0,0 +1,92 @@ +/// @ref gtx_bit + +namespace glm +{ + /////////////////// + // highestBitValue + + template + GLM_FUNC_QUALIFIER genIUType highestBitValue(genIUType Value) + { + genIUType tmp = Value; + genIUType result = genIUType(0); + while(tmp) + { + result = (tmp & (~tmp + 1)); // grab lowest bit + tmp &= ~result; // clear lowest bit + } + return result; + } + + template + GLM_FUNC_QUALIFIER vec highestBitValue(vec const& v) + { + return detail::functor1::call(highestBitValue, v); + } + + /////////////////// + // lowestBitValue + + template + GLM_FUNC_QUALIFIER genIUType lowestBitValue(genIUType Value) + { + return (Value & (~Value + 1)); + } + + template + GLM_FUNC_QUALIFIER vec lowestBitValue(vec const& v) + { + return detail::functor1::call(lowestBitValue, v); + } + + /////////////////// + // powerOfTwoAbove + + template + GLM_FUNC_QUALIFIER genType powerOfTwoAbove(genType value) + { + return isPowerOfTwo(value) ? value : highestBitValue(value) << 1; + } + + template + GLM_FUNC_QUALIFIER vec powerOfTwoAbove(vec const& v) + { + return detail::functor1::call(powerOfTwoAbove, v); + } + + /////////////////// + // powerOfTwoBelow + + template + GLM_FUNC_QUALIFIER genType powerOfTwoBelow(genType value) + { + return isPowerOfTwo(value) ? value : highestBitValue(value); + } + + template + GLM_FUNC_QUALIFIER vec powerOfTwoBelow(vec const& v) + { + return detail::functor1::call(powerOfTwoBelow, v); + } + + ///////////////////// + // powerOfTwoNearest + + template + GLM_FUNC_QUALIFIER genType powerOfTwoNearest(genType value) + { + if(isPowerOfTwo(value)) + return value; + + genType const prev = highestBitValue(value); + genType const next = prev << 1; + return (next - value) < (value - prev) ? next : prev; + } + + template + GLM_FUNC_QUALIFIER vec powerOfTwoNearest(vec const& v) + { + return detail::functor1::call(powerOfTwoNearest, v); + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/closest_point.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/closest_point.hpp new file mode 100644 index 000000000000..de6dbbff9447 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/closest_point.hpp @@ -0,0 +1,49 @@ +/// @ref gtx_closest_point +/// @file glm/gtx/closest_point.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_closest_point GLM_GTX_closest_point +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Find the point on a straight line which is the closet of a point. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_closest_point extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_closest_point + /// @{ + + /// Find the point on a straight line which is the closet of a point. + /// @see gtx_closest_point + template + GLM_FUNC_DECL vec<3, T, Q> closestPointOnLine( + vec<3, T, Q> const& point, + vec<3, T, Q> const& a, + vec<3, T, Q> const& b); + + /// 2d lines work as well + template + GLM_FUNC_DECL vec<2, T, Q> closestPointOnLine( + vec<2, T, Q> const& point, + vec<2, T, Q> const& a, + vec<2, T, Q> const& b); + + /// @} +}// namespace glm + +#include "closest_point.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/closest_point.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/closest_point.inl new file mode 100644 index 000000000000..0a39b042b88c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/closest_point.inl @@ -0,0 +1,45 @@ +/// @ref gtx_closest_point + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> closestPointOnLine + ( + vec<3, T, Q> const& point, + vec<3, T, Q> const& a, + vec<3, T, Q> const& b + ) + { + T LineLength = distance(a, b); + vec<3, T, Q> Vector = point - a; + vec<3, T, Q> LineDirection = (b - a) / LineLength; + + // Project Vector to LineDirection to get the distance of point from a + T Distance = dot(Vector, LineDirection); + + if(Distance <= T(0)) return a; + if(Distance >= LineLength) return b; + return a + LineDirection * Distance; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> closestPointOnLine + ( + vec<2, T, Q> const& point, + vec<2, T, Q> const& a, + vec<2, T, Q> const& b + ) + { + T LineLength = distance(a, b); + vec<2, T, Q> Vector = point - a; + vec<2, T, Q> LineDirection = (b - a) / LineLength; + + // Project Vector to LineDirection to get the distance of point from a + T Distance = dot(Vector, LineDirection); + + if(Distance <= T(0)) return a; + if(Distance >= LineLength) return b; + return a + LineDirection * Distance; + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/color_encoding.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_encoding.hpp new file mode 100644 index 000000000000..96ded2a2770f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_encoding.hpp @@ -0,0 +1,54 @@ +/// @ref gtx_color_encoding +/// @file glm/gtx/color_encoding.hpp +/// +/// @see core (dependence) +/// @see gtx_color_encoding (dependence) +/// +/// @defgroup gtx_color_encoding GLM_GTX_color_encoding +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// @brief Allow to perform bit operations on integer values + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../vec3.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTC_color_encoding is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTC_color_encoding extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_color_encoding + /// @{ + + /// Convert a linear sRGB color to D65 YUV. + template + GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB); + + /// Convert a linear sRGB color to D50 YUV. + template + GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB); + + /// Convert a D65 YUV color to linear sRGB. + template + GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ); + + /// Convert a D65 YUV color to D50 YUV. + template + GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ); + + /// @} +} //namespace glm + +#include "color_encoding.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/color_encoding.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_encoding.inl new file mode 100644 index 000000000000..e50fa3efa42c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_encoding.inl @@ -0,0 +1,45 @@ +/// @ref gtx_color_encoding + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB) + { + vec<3, T, Q> const M(0.490f, 0.17697f, 0.2f); + vec<3, T, Q> const N(0.31f, 0.8124f, 0.01063f); + vec<3, T, Q> const O(0.490f, 0.01f, 0.99f); + + return (M * ColorLinearSRGB + N * ColorLinearSRGB + O * ColorLinearSRGB) * static_cast(5.650675255693055f); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB) + { + vec<3, T, Q> const M(0.436030342570117f, 0.222438466210245f, 0.013897440074263f); + vec<3, T, Q> const N(0.385101860087134f, 0.716942745571917f, 0.097076381494207f); + vec<3, T, Q> const O(0.143067806654203f, 0.060618777416563f, 0.713926257896652f); + + return M * ColorLinearSRGB + N * ColorLinearSRGB + O * ColorLinearSRGB; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ) + { + vec<3, T, Q> const M(0.41847f, -0.091169f, 0.0009209f); + vec<3, T, Q> const N(-0.15866f, 0.25243f, 0.015708f); + vec<3, T, Q> const O(0.0009209f, -0.0025498f, 0.1786f); + + return M * ColorD65XYZ + N * ColorD65XYZ + O * ColorD65XYZ; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ) + { + vec<3, T, Q> const M(+1.047844353856414f, +0.029549007606644f, -0.009250984365223f); + vec<3, T, Q> const N(+0.022898981050086f, +0.990508028941971f, +0.015072338237051f); + vec<3, T, Q> const O(-0.050206647741605f, -0.017074711360960f, +0.751717835079977f); + + return M * ColorD65XYZ + N * ColorD65XYZ + O * ColorD65XYZ; + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space.hpp new file mode 100644 index 000000000000..a63439214908 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space.hpp @@ -0,0 +1,72 @@ +/// @ref gtx_color_space +/// @file glm/gtx/color_space.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_color_space GLM_GTX_color_space +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Related to RGB to HSV conversions and operations. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_color_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_color_space extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_color_space + /// @{ + + /// Converts a color from HSV color space to its color in RGB color space. + /// @see gtx_color_space + template + GLM_FUNC_DECL vec<3, T, Q> rgbColor( + vec<3, T, Q> const& hsvValue); + + /// Converts a color from RGB color space to its color in HSV color space. + /// @see gtx_color_space + template + GLM_FUNC_DECL vec<3, T, Q> hsvColor( + vec<3, T, Q> const& rgbValue); + + /// Build a saturation matrix. + /// @see gtx_color_space + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> saturation( + T const s); + + /// Modify the saturation of a color. + /// @see gtx_color_space + template + GLM_FUNC_DECL vec<3, T, Q> saturation( + T const s, + vec<3, T, Q> const& color); + + /// Modify the saturation of a color. + /// @see gtx_color_space + template + GLM_FUNC_DECL vec<4, T, Q> saturation( + T const s, + vec<4, T, Q> const& color); + + /// Compute color luminosity associating ratios (0.33, 0.59, 0.11) to RGB canals. + /// @see gtx_color_space + template + GLM_FUNC_DECL T luminosity( + vec<3, T, Q> const& color); + + /// @} +}//namespace glm + +#include "color_space.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space.inl new file mode 100644 index 000000000000..0a7059fabb7b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space.inl @@ -0,0 +1,141 @@ +/// @ref gtx_color_space + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rgbColor(const vec<3, T, Q>& hsvColor) + { + vec<3, T, Q> hsv = hsvColor; + vec<3, T, Q> rgbColor; + + if(hsv.y == static_cast(0)) + // achromatic (grey) + rgbColor = vec<3, T, Q>(hsv.z); + else + { + T sector = floor(hsv.x * (T(1) / T(60))); + T frac = (hsv.x * (T(1) / T(60))) - sector; + // factorial part of h + T o = hsv.z * (T(1) - hsv.y); + T p = hsv.z * (T(1) - hsv.y * frac); + T q = hsv.z * (T(1) - hsv.y * (T(1) - frac)); + + switch(int(sector)) + { + default: + case 0: + rgbColor.r = hsv.z; + rgbColor.g = q; + rgbColor.b = o; + break; + case 1: + rgbColor.r = p; + rgbColor.g = hsv.z; + rgbColor.b = o; + break; + case 2: + rgbColor.r = o; + rgbColor.g = hsv.z; + rgbColor.b = q; + break; + case 3: + rgbColor.r = o; + rgbColor.g = p; + rgbColor.b = hsv.z; + break; + case 4: + rgbColor.r = q; + rgbColor.g = o; + rgbColor.b = hsv.z; + break; + case 5: + rgbColor.r = hsv.z; + rgbColor.g = o; + rgbColor.b = p; + break; + } + } + + return rgbColor; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> hsvColor(const vec<3, T, Q>& rgbColor) + { + vec<3, T, Q> hsv = rgbColor; + T Min = min(min(rgbColor.r, rgbColor.g), rgbColor.b); + T Max = max(max(rgbColor.r, rgbColor.g), rgbColor.b); + T Delta = Max - Min; + + hsv.z = Max; + + if(Max != static_cast(0)) + { + hsv.y = Delta / hsv.z; + T h = static_cast(0); + + if(rgbColor.r == Max) + // between yellow & magenta + h = static_cast(0) + T(60) * (rgbColor.g - rgbColor.b) / Delta; + else if(rgbColor.g == Max) + // between cyan & yellow + h = static_cast(120) + T(60) * (rgbColor.b - rgbColor.r) / Delta; + else + // between magenta & cyan + h = static_cast(240) + T(60) * (rgbColor.r - rgbColor.g) / Delta; + + if(h < T(0)) + hsv.x = h + T(360); + else + hsv.x = h; + } + else + { + // If r = g = b = 0 then s = 0, h is undefined + hsv.y = static_cast(0); + hsv.x = static_cast(0); + } + + return hsv; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> saturation(T const s) + { + vec<3, T, defaultp> rgbw = vec<3, T, defaultp>(T(0.2126), T(0.7152), T(0.0722)); + + vec<3, T, defaultp> const col((T(1) - s) * rgbw); + + mat<4, 4, T, defaultp> result(T(1)); + result[0][0] = col.x + s; + result[0][1] = col.x; + result[0][2] = col.x; + result[1][0] = col.y; + result[1][1] = col.y + s; + result[1][2] = col.y; + result[2][0] = col.z; + result[2][1] = col.z; + result[2][2] = col.z + s; + + return result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> saturation(const T s, const vec<3, T, Q>& color) + { + return vec<3, T, Q>(saturation(s) * vec<4, T, Q>(color, T(0))); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> saturation(const T s, const vec<4, T, Q>& color) + { + return saturation(s) * color; + } + + template + GLM_FUNC_QUALIFIER T luminosity(const vec<3, T, Q>& color) + { + const vec<3, T, Q> tmp = vec<3, T, Q>(0.33, 0.59, 0.11); + return dot(color, tmp); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space_YCoCg.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space_YCoCg.hpp new file mode 100644 index 000000000000..dd2b771693f6 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space_YCoCg.hpp @@ -0,0 +1,60 @@ +/// @ref gtx_color_space_YCoCg +/// @file glm/gtx/color_space_YCoCg.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_color_space_YCoCg GLM_GTX_color_space_YCoCg +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// RGB to YCoCg conversions and operations + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_color_space_YCoCg is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_color_space_YCoCg extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_color_space_YCoCg + /// @{ + + /// Convert a color from RGB color space to YCoCg color space. + /// @see gtx_color_space_YCoCg + template + GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCg( + vec<3, T, Q> const& rgbColor); + + /// Convert a color from YCoCg color space to RGB color space. + /// @see gtx_color_space_YCoCg + template + GLM_FUNC_DECL vec<3, T, Q> YCoCg2rgb( + vec<3, T, Q> const& YCoCgColor); + + /// Convert a color from RGB color space to YCoCgR color space. + /// @see "YCoCg-R: A Color Space with RGB Reversibility and Low Dynamic Range" + /// @see gtx_color_space_YCoCg + template + GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCgR( + vec<3, T, Q> const& rgbColor); + + /// Convert a color from YCoCgR color space to RGB color space. + /// @see "YCoCg-R: A Color Space with RGB Reversibility and Low Dynamic Range" + /// @see gtx_color_space_YCoCg + template + GLM_FUNC_DECL vec<3, T, Q> YCoCgR2rgb( + vec<3, T, Q> const& YCoCgColor); + + /// @} +}//namespace glm + +#include "color_space_YCoCg.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space_YCoCg.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space_YCoCg.inl new file mode 100644 index 000000000000..83ba857c08bd --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/color_space_YCoCg.inl @@ -0,0 +1,107 @@ +/// @ref gtx_color_space_YCoCg + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCg + ( + vec<3, T, Q> const& rgbColor + ) + { + vec<3, T, Q> result; + result.x/*Y */ = rgbColor.r / T(4) + rgbColor.g / T(2) + rgbColor.b / T(4); + result.y/*Co*/ = rgbColor.r / T(2) + rgbColor.g * T(0) - rgbColor.b / T(2); + result.z/*Cg*/ = - rgbColor.r / T(4) + rgbColor.g / T(2) - rgbColor.b / T(4); + return result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCg2rgb + ( + vec<3, T, Q> const& YCoCgColor + ) + { + vec<3, T, Q> result; + result.r = YCoCgColor.x + YCoCgColor.y - YCoCgColor.z; + result.g = YCoCgColor.x + YCoCgColor.z; + result.b = YCoCgColor.x - YCoCgColor.y - YCoCgColor.z; + return result; + } + + template + class compute_YCoCgR { + public: + static GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR + ( + vec<3, T, Q> const& rgbColor + ) + { + vec<3, T, Q> result; + result.x/*Y */ = rgbColor.g * static_cast(0.5) + (rgbColor.r + rgbColor.b) * static_cast(0.25); + result.y/*Co*/ = rgbColor.r - rgbColor.b; + result.z/*Cg*/ = rgbColor.g - (rgbColor.r + rgbColor.b) * static_cast(0.5); + return result; + } + + static GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb + ( + vec<3, T, Q> const& YCoCgRColor + ) + { + vec<3, T, Q> result; + T tmp = YCoCgRColor.x - (YCoCgRColor.z * static_cast(0.5)); + result.g = YCoCgRColor.z + tmp; + result.b = tmp - (YCoCgRColor.y * static_cast(0.5)); + result.r = result.b + YCoCgRColor.y; + return result; + } + }; + + template + class compute_YCoCgR { + public: + static GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR + ( + vec<3, T, Q> const& rgbColor + ) + { + vec<3, T, Q> result; + result.y/*Co*/ = rgbColor.r - rgbColor.b; + T tmp = rgbColor.b + (result.y >> 1); + result.z/*Cg*/ = rgbColor.g - tmp; + result.x/*Y */ = tmp + (result.z >> 1); + return result; + } + + static GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb + ( + vec<3, T, Q> const& YCoCgRColor + ) + { + vec<3, T, Q> result; + T tmp = YCoCgRColor.x - (YCoCgRColor.z >> 1); + result.g = YCoCgRColor.z + tmp; + result.b = tmp - (YCoCgRColor.y >> 1); + result.r = result.b + YCoCgRColor.y; + return result; + } + }; + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR + ( + vec<3, T, Q> const& rgbColor + ) + { + return compute_YCoCgR::is_integer>::rgb2YCoCgR(rgbColor); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb + ( + vec<3, T, Q> const& YCoCgRColor + ) + { + return compute_YCoCgR::is_integer>::YCoCgR2rgb(YCoCgRColor); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/common.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/common.hpp new file mode 100644 index 000000000000..254ada2d7695 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/common.hpp @@ -0,0 +1,76 @@ +/// @ref gtx_common +/// @file glm/gtx/common.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_common GLM_GTX_common +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// @brief Provide functions to increase the compatibility with Cg and HLSL languages + +#pragma once + +// Dependencies: +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../gtc/vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_common is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_common extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_common + /// @{ + + /// Returns true if x is a denormalized number + /// Numbers whose absolute value is too small to be represented in the normal format are represented in an alternate, denormalized format. + /// This format is less precise but can represent values closer to zero. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL isnan man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL typename genType::bool_type isdenormal(genType const& x); + + /// Similar to 'mod' but with a different rounding and integer support. + /// Returns 'x - y * trunc(x/y)' instead of 'x - y * floor(x/y)' + /// + /// @see GLSL mod vs HLSL fmod + /// @see GLSL mod man page + template + GLM_FUNC_DECL vec fmod(vec const& v); + + /// Returns whether vector components values are within an interval. A open interval excludes its endpoints, and is denoted with square brackets. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_relational + template + GLM_FUNC_DECL vec openBounded(vec const& Value, vec const& Min, vec const& Max); + + /// Returns whether vector components values are within an interval. A closed interval includes its endpoints, and is denoted with square brackets. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_relational + template + GLM_FUNC_DECL vec closeBounded(vec const& Value, vec const& Min, vec const& Max); + + /// @} +}//namespace glm + +#include "common.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/common.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/common.inl new file mode 100644 index 000000000000..4575b20752f8 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/common.inl @@ -0,0 +1,125 @@ +/// @ref gtx_common + +#include +#include "../gtc/epsilon.hpp" +#include "../gtc/constants.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_fmod + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + return detail::functor2::call(std::fmod, a, b); + } + }; + + template + struct compute_fmod + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + return a % b; + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER bool isdenormal(T const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isdenormal' only accept floating-point inputs"); + +# if GLM_HAS_CXX11_STL + return std::fpclassify(x) == FP_SUBNORMAL; +# else + return epsilonNotEqual(x, static_cast(0), epsilon()) && std::fabs(x) < std::numeric_limits::min(); +# endif + } + + template + GLM_FUNC_QUALIFIER typename vec<1, T, Q>::bool_type isdenormal + ( + vec<1, T, Q> const& x + ) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isdenormal' only accept floating-point inputs"); + + return typename vec<1, T, Q>::bool_type( + isdenormal(x.x)); + } + + template + GLM_FUNC_QUALIFIER typename vec<2, T, Q>::bool_type isdenormal + ( + vec<2, T, Q> const& x + ) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isdenormal' only accept floating-point inputs"); + + return typename vec<2, T, Q>::bool_type( + isdenormal(x.x), + isdenormal(x.y)); + } + + template + GLM_FUNC_QUALIFIER typename vec<3, T, Q>::bool_type isdenormal + ( + vec<3, T, Q> const& x + ) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isdenormal' only accept floating-point inputs"); + + return typename vec<3, T, Q>::bool_type( + isdenormal(x.x), + isdenormal(x.y), + isdenormal(x.z)); + } + + template + GLM_FUNC_QUALIFIER typename vec<4, T, Q>::bool_type isdenormal + ( + vec<4, T, Q> const& x + ) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isdenormal' only accept floating-point inputs"); + + return typename vec<4, T, Q>::bool_type( + isdenormal(x.x), + isdenormal(x.y), + isdenormal(x.z), + isdenormal(x.w)); + } + + // fmod + template + GLM_FUNC_QUALIFIER genType fmod(genType x, genType y) + { + return fmod(vec<1, genType>(x), y).x; + } + + template + GLM_FUNC_QUALIFIER vec fmod(vec const& x, T y) + { + return detail::compute_fmod::is_iec559>::call(x, vec(y)); + } + + template + GLM_FUNC_QUALIFIER vec fmod(vec const& x, vec const& y) + { + return detail::compute_fmod::is_iec559>::call(x, y); + } + + template + GLM_FUNC_QUALIFIER vec openBounded(vec const& Value, vec const& Min, vec const& Max) + { + return greaterThan(Value, Min) && lessThan(Value, Max); + } + + template + GLM_FUNC_QUALIFIER vec closeBounded(vec const& Value, vec const& Min, vec const& Max) + { + return greaterThanEqual(Value, Min) && lessThanEqual(Value, Max); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/compatibility.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/compatibility.hpp new file mode 100644 index 000000000000..0af75583b96a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/compatibility.hpp @@ -0,0 +1,133 @@ +/// @ref gtx_compatibility +/// @file glm/gtx/compatibility.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_compatibility GLM_GTX_compatibility +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Provide functions to increase the compatibility with Cg and HLSL languages + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/quaternion.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_compatibility is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_compatibility extension included") +# endif +#endif + +#if GLM_COMPILER & GLM_COMPILER_VC +# include +#elif GLM_COMPILER & GLM_COMPILER_GCC +# include +# if(GLM_PLATFORM & GLM_PLATFORM_ANDROID) +# undef isfinite +# endif +#endif//GLM_COMPILER + +namespace glm +{ + /// @addtogroup gtx_compatibility + /// @{ + + template GLM_FUNC_QUALIFIER T lerp(T x, T y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + + template GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, const vec<2, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, const vec<3, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, const vec<4, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + + template GLM_FUNC_QUALIFIER T saturate(T x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<2, T, Q> saturate(const vec<2, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<3, T, Q> saturate(const vec<3, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<4, T, Q> saturate(const vec<4, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) + + template GLM_FUNC_QUALIFIER T atan2(T y, T x){return atan(y, x);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<2, T, Q> atan2(const vec<2, T, Q>& y, const vec<2, T, Q>& x){return atan(y, x);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<3, T, Q> atan2(const vec<3, T, Q>& y, const vec<3, T, Q>& x){return atan(y, x);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<4, T, Q> atan2(const vec<4, T, Q>& y, const vec<4, T, Q>& x){return atan(y, x);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) + + template GLM_FUNC_DECL bool isfinite(genType const& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) + template GLM_FUNC_DECL vec<1, bool, Q> isfinite(const vec<1, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) + template GLM_FUNC_DECL vec<2, bool, Q> isfinite(const vec<2, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) + template GLM_FUNC_DECL vec<3, bool, Q> isfinite(const vec<3, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) + template GLM_FUNC_DECL vec<4, bool, Q> isfinite(const vec<4, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) + + typedef bool bool1; //!< \brief boolean type with 1 component. (From GLM_GTX_compatibility extension) + typedef vec<2, bool, highp> bool2; //!< \brief boolean type with 2 components. (From GLM_GTX_compatibility extension) + typedef vec<3, bool, highp> bool3; //!< \brief boolean type with 3 components. (From GLM_GTX_compatibility extension) + typedef vec<4, bool, highp> bool4; //!< \brief boolean type with 4 components. (From GLM_GTX_compatibility extension) + + typedef bool bool1x1; //!< \brief boolean matrix with 1 x 1 component. (From GLM_GTX_compatibility extension) + typedef mat<2, 2, bool, highp> bool2x2; //!< \brief boolean matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 3, bool, highp> bool2x3; //!< \brief boolean matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 4, bool, highp> bool2x4; //!< \brief boolean matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 2, bool, highp> bool3x2; //!< \brief boolean matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 3, bool, highp> bool3x3; //!< \brief boolean matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 4, bool, highp> bool3x4; //!< \brief boolean matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 2, bool, highp> bool4x2; //!< \brief boolean matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 3, bool, highp> bool4x3; //!< \brief boolean matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 4, bool, highp> bool4x4; //!< \brief boolean matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) + + typedef int int1; //!< \brief integer vector with 1 component. (From GLM_GTX_compatibility extension) + typedef vec<2, int, highp> int2; //!< \brief integer vector with 2 components. (From GLM_GTX_compatibility extension) + typedef vec<3, int, highp> int3; //!< \brief integer vector with 3 components. (From GLM_GTX_compatibility extension) + typedef vec<4, int, highp> int4; //!< \brief integer vector with 4 components. (From GLM_GTX_compatibility extension) + + typedef int int1x1; //!< \brief integer matrix with 1 component. (From GLM_GTX_compatibility extension) + typedef mat<2, 2, int, highp> int2x2; //!< \brief integer matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 3, int, highp> int2x3; //!< \brief integer matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 4, int, highp> int2x4; //!< \brief integer matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 2, int, highp> int3x2; //!< \brief integer matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 3, int, highp> int3x3; //!< \brief integer matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 4, int, highp> int3x4; //!< \brief integer matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 2, int, highp> int4x2; //!< \brief integer matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 3, int, highp> int4x3; //!< \brief integer matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 4, int, highp> int4x4; //!< \brief integer matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) + + typedef float float1; //!< \brief single-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension) + typedef vec<2, float, highp> float2; //!< \brief single-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension) + typedef vec<3, float, highp> float3; //!< \brief single-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension) + typedef vec<4, float, highp> float4; //!< \brief single-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension) + + typedef float float1x1; //!< \brief single-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension) + typedef mat<2, 2, float, highp> float2x2; //!< \brief single-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 3, float, highp> float2x3; //!< \brief single-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 4, float, highp> float2x4; //!< \brief single-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 2, float, highp> float3x2; //!< \brief single-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 3, float, highp> float3x3; //!< \brief single-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 4, float, highp> float3x4; //!< \brief single-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 2, float, highp> float4x2; //!< \brief single-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 3, float, highp> float4x3; //!< \brief single-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 4, float, highp> float4x4; //!< \brief single-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) + + typedef double double1; //!< \brief double-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension) + typedef vec<2, double, highp> double2; //!< \brief double-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension) + typedef vec<3, double, highp> double3; //!< \brief double-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension) + typedef vec<4, double, highp> double4; //!< \brief double-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension) + + typedef double double1x1; //!< \brief double-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension) + typedef mat<2, 2, double, highp> double2x2; //!< \brief double-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 3, double, highp> double2x3; //!< \brief double-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 4, double, highp> double2x4; //!< \brief double-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 2, double, highp> double3x2; //!< \brief double-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 3, double, highp> double3x3; //!< \brief double-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 4, double, highp> double3x4; //!< \brief double-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 2, double, highp> double4x2; //!< \brief double-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 3, double, highp> double4x3; //!< \brief double-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 4, double, highp> double4x4; //!< \brief double-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) + + /// @} +}//namespace glm + +#include "compatibility.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/compatibility.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/compatibility.inl new file mode 100644 index 000000000000..1d49496b6c6e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/compatibility.inl @@ -0,0 +1,62 @@ +#include + +namespace glm +{ + // isfinite + template + GLM_FUNC_QUALIFIER bool isfinite( + genType const& x) + { +# if GLM_HAS_CXX11_STL + return std::isfinite(x) != 0; +# elif GLM_COMPILER & GLM_COMPILER_VC + return _finite(x) != 0; +# elif GLM_COMPILER & GLM_COMPILER_GCC && GLM_PLATFORM & GLM_PLATFORM_ANDROID + return _isfinite(x) != 0; +# else + if (std::numeric_limits::is_integer || std::denorm_absent == std::numeric_limits::has_denorm) + return std::numeric_limits::min() <= x && std::numeric_limits::max() >= x; + else + return -std::numeric_limits::max() <= x && std::numeric_limits::max() >= x; +# endif + } + + template + GLM_FUNC_QUALIFIER vec<1, bool, Q> isfinite( + vec<1, T, Q> const& x) + { + return vec<1, bool, Q>( + isfinite(x.x)); + } + + template + GLM_FUNC_QUALIFIER vec<2, bool, Q> isfinite( + vec<2, T, Q> const& x) + { + return vec<2, bool, Q>( + isfinite(x.x), + isfinite(x.y)); + } + + template + GLM_FUNC_QUALIFIER vec<3, bool, Q> isfinite( + vec<3, T, Q> const& x) + { + return vec<3, bool, Q>( + isfinite(x.x), + isfinite(x.y), + isfinite(x.z)); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> isfinite( + vec<4, T, Q> const& x) + { + return vec<4, bool, Q>( + isfinite(x.x), + isfinite(x.y), + isfinite(x.z), + isfinite(x.w)); + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/component_wise.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/component_wise.hpp new file mode 100644 index 000000000000..34a2b0a37517 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/component_wise.hpp @@ -0,0 +1,69 @@ +/// @ref gtx_component_wise +/// @file glm/gtx/component_wise.hpp +/// @date 2007-05-21 / 2011-06-07 +/// @author Christophe Riccio +/// +/// @see core (dependence) +/// +/// @defgroup gtx_component_wise GLM_GTX_component_wise +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Operations between components of a type + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_component_wise is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_component_wise extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_component_wise + /// @{ + + /// Convert an integer vector to a normalized float vector. + /// If the parameter value type is already a floating qualifier type, the value is passed through. + /// @see gtx_component_wise + template + GLM_FUNC_DECL vec compNormalize(vec const& v); + + /// Convert a normalized float vector to an integer vector. + /// If the parameter value type is already a floating qualifier type, the value is passed through. + /// @see gtx_component_wise + template + GLM_FUNC_DECL vec compScale(vec const& v); + + /// Add all vector components together. + /// @see gtx_component_wise + template + GLM_FUNC_DECL typename genType::value_type compAdd(genType const& v); + + /// Multiply all vector components together. + /// @see gtx_component_wise + template + GLM_FUNC_DECL typename genType::value_type compMul(genType const& v); + + /// Find the minimum value between single vector components. + /// @see gtx_component_wise + template + GLM_FUNC_DECL typename genType::value_type compMin(genType const& v); + + /// Find the maximum value between single vector components. + /// @see gtx_component_wise + template + GLM_FUNC_DECL typename genType::value_type compMax(genType const& v); + + /// @} +}//namespace glm + +#include "component_wise.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/component_wise.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/component_wise.inl new file mode 100644 index 000000000000..cbbc7d41ec00 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/component_wise.inl @@ -0,0 +1,127 @@ +/// @ref gtx_component_wise + +#include + +namespace glm{ +namespace detail +{ + template + struct compute_compNormalize + {}; + + template + struct compute_compNormalize + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + floatType const Min = static_cast(std::numeric_limits::min()); + floatType const Max = static_cast(std::numeric_limits::max()); + return (vec(v) - Min) / (Max - Min) * static_cast(2) - static_cast(1); + } + }; + + template + struct compute_compNormalize + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + return vec(v) / static_cast(std::numeric_limits::max()); + } + }; + + template + struct compute_compNormalize + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + return v; + } + }; + + template + struct compute_compScale + {}; + + template + struct compute_compScale + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + floatType const Max = static_cast(std::numeric_limits::max()) + static_cast(0.5); + vec const Scaled(v * Max); + vec const Result(Scaled - static_cast(0.5)); + return Result; + } + }; + + template + struct compute_compScale + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + return vec(vec(v) * static_cast(std::numeric_limits::max())); + } + }; + + template + struct compute_compScale + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + return v; + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER vec compNormalize(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'compNormalize' accepts only floating-point types for 'floatType' template parameter"); + + return detail::compute_compNormalize::is_integer, std::numeric_limits::is_signed>::call(v); + } + + template + GLM_FUNC_QUALIFIER vec compScale(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'compScale' accepts only floating-point types for 'floatType' template parameter"); + + return detail::compute_compScale::is_integer, std::numeric_limits::is_signed>::call(v); + } + + template + GLM_FUNC_QUALIFIER T compAdd(vec const& v) + { + T Result(0); + for(length_t i = 0, n = v.length(); i < n; ++i) + Result += v[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER T compMul(vec const& v) + { + T Result(1); + for(length_t i = 0, n = v.length(); i < n; ++i) + Result *= v[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER T compMin(vec const& v) + { + T Result(v[0]); + for(length_t i = 1, n = v.length(); i < n; ++i) + Result = min(Result, v[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER T compMax(vec const& v) + { + T Result(v[0]); + for(length_t i = 1, n = v.length(); i < n; ++i) + Result = max(Result, v[i]); + return Result; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/dual_quaternion.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/dual_quaternion.hpp new file mode 100644 index 000000000000..a6f57613db1b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/dual_quaternion.hpp @@ -0,0 +1,274 @@ +/// @ref gtx_dual_quaternion +/// @file glm/gtx/dual_quaternion.hpp +/// @author Maksim Vorobiev (msomeone@gmail.com) +/// +/// @see core (dependence) +/// @see gtc_constants (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtx_dual_quaternion GLM_GTX_dual_quaternion +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defines a templated dual-quaternion type and several dual-quaternion operations. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/constants.hpp" +#include "../gtc/quaternion.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_dual_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_dual_quaternion extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_dual_quaternion + /// @{ + + template + struct tdualquat + { + // -- Implementation detail -- + + typedef T value_type; + typedef qua part_type; + + // -- Data -- + + qua real, dual; + + // -- Component accesses -- + + typedef length_t length_type; + /// Return the count of components of a dual quaternion + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;} + + GLM_FUNC_DECL part_type & operator[](length_type i); + GLM_FUNC_DECL part_type const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR tdualquat() GLM_DEFAULT; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat const& d) GLM_DEFAULT; + template + GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat const& d); + + // -- Explicit basic constructors -- + + GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua const& real); + GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua const& orientation, vec<3, T, Q> const& translation); + GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua const& real, qua const& dual); + + // -- Conversion constructors -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT tdualquat(tdualquat const& q); + + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR tdualquat(mat<2, 4, T, Q> const& holder_mat); + GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR tdualquat(mat<3, 4, T, Q> const& aug_mat); + + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL tdualquat & operator=(tdualquat const& m) GLM_DEFAULT; + + template + GLM_FUNC_DECL tdualquat & operator=(tdualquat const& m); + template + GLM_FUNC_DECL tdualquat & operator*=(U s); + template + GLM_FUNC_DECL tdualquat & operator/=(U s); + }; + + // -- Unary bit operators -- + + template + GLM_FUNC_DECL tdualquat operator+(tdualquat const& q); + + template + GLM_FUNC_DECL tdualquat operator-(tdualquat const& q); + + // -- Binary operators -- + + template + GLM_FUNC_DECL tdualquat operator+(tdualquat const& q, tdualquat const& p); + + template + GLM_FUNC_DECL tdualquat operator*(tdualquat const& q, tdualquat const& p); + + template + GLM_FUNC_DECL vec<3, T, Q> operator*(tdualquat const& q, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat const& q); + + template + GLM_FUNC_DECL vec<4, T, Q> operator*(tdualquat const& q, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat const& q); + + template + GLM_FUNC_DECL tdualquat operator*(tdualquat const& q, T const& s); + + template + GLM_FUNC_DECL tdualquat operator*(T const& s, tdualquat const& q); + + template + GLM_FUNC_DECL tdualquat operator/(tdualquat const& q, T const& s); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL bool operator==(tdualquat const& q1, tdualquat const& q2); + + template + GLM_FUNC_DECL bool operator!=(tdualquat const& q1, tdualquat const& q2); + + /// Creates an identity dual quaternion. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL tdualquat dual_quat_identity(); + + /// Returns the normalized quaternion. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL tdualquat normalize(tdualquat const& q); + + /// Returns the linear interpolation of two dual quaternion. + /// + /// @see gtc_dual_quaternion + template + GLM_FUNC_DECL tdualquat lerp(tdualquat const& x, tdualquat const& y, T const& a); + + /// Returns the q inverse. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL tdualquat inverse(tdualquat const& q); + + /// Converts a quaternion to a 2 * 4 matrix. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL mat<2, 4, T, Q> mat2x4_cast(tdualquat const& x); + + /// Converts a quaternion to a 3 * 4 matrix. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL mat<3, 4, T, Q> mat3x4_cast(tdualquat const& x); + + /// Converts a 2 * 4 matrix (matrix which holds real and dual parts) to a quaternion. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL tdualquat dualquat_cast(mat<2, 4, T, Q> const& x); + + /// Converts a 3 * 4 matrix (augmented matrix rotation + translation) to a quaternion. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL tdualquat dualquat_cast(mat<3, 4, T, Q> const& x); + + + /// Dual-quaternion of low single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat lowp_dualquat; + + /// Dual-quaternion of medium single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat mediump_dualquat; + + /// Dual-quaternion of high single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat highp_dualquat; + + + /// Dual-quaternion of low single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat lowp_fdualquat; + + /// Dual-quaternion of medium single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat mediump_fdualquat; + + /// Dual-quaternion of high single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat highp_fdualquat; + + + /// Dual-quaternion of low double-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat lowp_ddualquat; + + /// Dual-quaternion of medium double-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat mediump_ddualquat; + + /// Dual-quaternion of high double-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat highp_ddualquat; + + +#if(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) + /// Dual-quaternion of floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef highp_fdualquat dualquat; + + /// Dual-quaternion of single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef highp_fdualquat fdualquat; +#elif(defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) + typedef highp_fdualquat dualquat; + typedef highp_fdualquat fdualquat; +#elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) + typedef mediump_fdualquat dualquat; + typedef mediump_fdualquat fdualquat; +#elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && defined(GLM_PRECISION_LOWP_FLOAT)) + typedef lowp_fdualquat dualquat; + typedef lowp_fdualquat fdualquat; +#else +# error "GLM error: multiple default precision requested for single-precision floating-point types" +#endif + + +#if(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) + /// Dual-quaternion of default double-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef highp_ddualquat ddualquat; +#elif(defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) + typedef highp_ddualquat ddualquat; +#elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) + typedef mediump_ddualquat ddualquat; +#elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && defined(GLM_PRECISION_LOWP_DOUBLE)) + typedef lowp_ddualquat ddualquat; +#else +# error "GLM error: Multiple default precision requested for double-precision floating-point types" +#endif + + /// @} +} //namespace glm + +#include "dual_quaternion.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/dual_quaternion.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/dual_quaternion.inl new file mode 100644 index 000000000000..3a04160e3a16 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/dual_quaternion.inl @@ -0,0 +1,352 @@ +/// @ref gtx_dual_quaternion + +#include "../geometric.hpp" +#include + +namespace glm +{ + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER typename tdualquat::part_type & tdualquat::operator[](typename tdualquat::length_type i) + { + assert(i >= 0 && i < this->length()); + return (&real)[i]; + } + + template + GLM_FUNC_QUALIFIER typename tdualquat::part_type const& tdualquat::operator[](typename tdualquat::length_type i) const + { + assert(i >= 0 && i < this->length()); + return (&real)[i]; + } + + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat() +# if GLM_CONFIG_DEFAULTED_FUNCTIONS != GLM_DISABLE + : real(qua()) + , dual(qua::wxyz(0, 0, 0, 0)) +# endif + {} + + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& d) + : real(d.real) + , dual(d.dual) + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& d) + : real(d.real) + , dual(d.dual) + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& r) + : real(r), dual(qua::wxyz(0, 0, 0, 0)) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& q, vec<3, T, Q> const& p) + : real(q), dual(qua::wxyz( + T(-0.5) * ( p.x*q.x + p.y*q.y + p.z*q.z), + T(+0.5) * ( p.x*q.w + p.y*q.z - p.z*q.y), + T(+0.5) * (-p.x*q.z + p.y*q.w + p.z*q.x), + T(+0.5) * ( p.x*q.y - p.y*q.x + p.z*q.w))) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& r, qua const& d) + : real(r), dual(d) + {} + + // -- Conversion constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& q) + : real(q.real) + , dual(q.dual) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(mat<2, 4, T, Q> const& m) + { + *this = dualquat_cast(m); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(mat<3, 4, T, Q> const& m) + { + *this = dualquat_cast(m); + } + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER tdualquat & tdualquat::operator=(tdualquat const& q) + { + this->real = q.real; + this->dual = q.dual; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator=(tdualquat const& q) + { + this->real = q.real; + this->dual = q.dual; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator*=(U s) + { + this->real *= static_cast(s); + this->dual *= static_cast(s); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator/=(U s) + { + this->real /= static_cast(s); + this->dual /= static_cast(s); + return *this; + } + + // -- Unary bit operators -- + + template + GLM_FUNC_QUALIFIER tdualquat operator+(tdualquat const& q) + { + return q; + } + + template + GLM_FUNC_QUALIFIER tdualquat operator-(tdualquat const& q) + { + return tdualquat(-q.real, -q.dual); + } + + // -- Binary operators -- + + template + GLM_FUNC_QUALIFIER tdualquat operator+(tdualquat const& q, tdualquat const& p) + { + return tdualquat(q.real + p.real,q.dual + p.dual); + } + + template + GLM_FUNC_QUALIFIER tdualquat operator*(tdualquat const& p, tdualquat const& o) + { + return tdualquat(p.real * o.real,p.real * o.dual + p.dual * o.real); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> operator*(tdualquat const& q, vec<3, T, Q> const& v) + { + vec<3, T, Q> const real_v3(q.real.x,q.real.y,q.real.z); + vec<3, T, Q> const dual_v3(q.dual.x,q.dual.y,q.dual.z); + return (cross(real_v3, cross(real_v3,v) + v * q.real.w + dual_v3) + dual_v3 * q.real.w - real_v3 * q.dual.w) * T(2) + v; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat const& q) + { + return glm::inverse(q) * v; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> operator*(tdualquat const& q, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(q * vec<3, T, Q>(v), v.w); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat const& q) + { + return glm::inverse(q) * v; + } + + template + GLM_FUNC_QUALIFIER tdualquat operator*(tdualquat const& q, T const& s) + { + return tdualquat(q.real * s, q.dual * s); + } + + template + GLM_FUNC_QUALIFIER tdualquat operator*(T const& s, tdualquat const& q) + { + return q * s; + } + + template + GLM_FUNC_QUALIFIER tdualquat operator/(tdualquat const& q, T const& s) + { + return tdualquat(q.real / s, q.dual / s); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER bool operator==(tdualquat const& q1, tdualquat const& q2) + { + return (q1.real == q2.real) && (q1.dual == q2.dual); + } + + template + GLM_FUNC_QUALIFIER bool operator!=(tdualquat const& q1, tdualquat const& q2) + { + return (q1.real != q2.real) || (q1.dual != q2.dual); + } + + // -- Operations -- + + template + GLM_FUNC_QUALIFIER tdualquat dual_quat_identity() + { + return tdualquat( + qua::wxyz(static_cast(1), static_cast(0), static_cast(0), static_cast(0)), + qua::wxyz(static_cast(0), static_cast(0), static_cast(0), static_cast(0))); + } + + template + GLM_FUNC_QUALIFIER tdualquat normalize(tdualquat const& q) + { + return q / length(q.real); + } + + template + GLM_FUNC_QUALIFIER tdualquat lerp(tdualquat const& x, tdualquat const& y, T const& a) + { + // Dual Quaternion Linear blend aka DLB: + // Lerp is only defined in [0, 1] + assert(a >= static_cast(0)); + assert(a <= static_cast(1)); + T const k = dot(x.real,y.real) < static_cast(0) ? -a : a; + T const one(1); + return tdualquat(x * (one - a) + y * k); + } + + template + GLM_FUNC_QUALIFIER tdualquat inverse(tdualquat const& q) + { + const glm::qua real = conjugate(q.real); + const glm::qua dual = conjugate(q.dual); + return tdualquat(real, dual + (real * (-2.0f * dot(real,dual)))); + } + + template + GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat2x4_cast(tdualquat const& x) + { + return mat<2, 4, T, Q>( x[0].x, x[0].y, x[0].z, x[0].w, x[1].x, x[1].y, x[1].z, x[1].w ); + } + + template + GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat3x4_cast(tdualquat const& x) + { + qua r = x.real / length2(x.real); + + qua const rr(r.w * x.real.w, r.x * x.real.x, r.y * x.real.y, r.z * x.real.z); + r *= static_cast(2); + + T const xy = r.x * x.real.y; + T const xz = r.x * x.real.z; + T const yz = r.y * x.real.z; + T const wx = r.w * x.real.x; + T const wy = r.w * x.real.y; + T const wz = r.w * x.real.z; + + vec<4, T, Q> const a( + rr.w + rr.x - rr.y - rr.z, + xy - wz, + xz + wy, + -(x.dual.w * r.x - x.dual.x * r.w + x.dual.y * r.z - x.dual.z * r.y)); + + vec<4, T, Q> const b( + xy + wz, + rr.w + rr.y - rr.x - rr.z, + yz - wx, + -(x.dual.w * r.y - x.dual.x * r.z - x.dual.y * r.w + x.dual.z * r.x)); + + vec<4, T, Q> const c( + xz - wy, + yz + wx, + rr.w + rr.z - rr.x - rr.y, + -(x.dual.w * r.z + x.dual.x * r.y - x.dual.y * r.x - x.dual.z * r.w)); + + return mat<3, 4, T, Q>(a, b, c); + } + + template + GLM_FUNC_QUALIFIER tdualquat dualquat_cast(mat<2, 4, T, Q> const& x) + { + return tdualquat( + qua::wxyz( x[0].w, x[0].x, x[0].y, x[0].z ), + qua::wxyz( x[1].w, x[1].x, x[1].y, x[1].z )); + } + + template + GLM_FUNC_QUALIFIER tdualquat dualquat_cast(mat<3, 4, T, Q> const& x) + { + qua real; + + T const trace = x[0].x + x[1].y + x[2].z; + if(trace > static_cast(0)) + { + T const r = sqrt(T(1) + trace); + T const invr = static_cast(0.5) / r; + real.w = static_cast(0.5) * r; + real.x = (x[2].y - x[1].z) * invr; + real.y = (x[0].z - x[2].x) * invr; + real.z = (x[1].x - x[0].y) * invr; + } + else if(x[0].x > x[1].y && x[0].x > x[2].z) + { + T const r = sqrt(T(1) + x[0].x - x[1].y - x[2].z); + T const invr = static_cast(0.5) / r; + real.x = static_cast(0.5)*r; + real.y = (x[1].x + x[0].y) * invr; + real.z = (x[0].z + x[2].x) * invr; + real.w = (x[2].y - x[1].z) * invr; + } + else if(x[1].y > x[2].z) + { + T const r = sqrt(T(1) + x[1].y - x[0].x - x[2].z); + T const invr = static_cast(0.5) / r; + real.x = (x[1].x + x[0].y) * invr; + real.y = static_cast(0.5) * r; + real.z = (x[2].y + x[1].z) * invr; + real.w = (x[0].z - x[2].x) * invr; + } + else + { + T const r = sqrt(T(1) + x[2].z - x[0].x - x[1].y); + T const invr = static_cast(0.5) / r; + real.x = (x[0].z + x[2].x) * invr; + real.y = (x[2].y + x[1].z) * invr; + real.z = static_cast(0.5) * r; + real.w = (x[1].x - x[0].y) * invr; + } + + qua dual; + dual.x = static_cast(0.5) * ( x[0].w * real.w + x[1].w * real.z - x[2].w * real.y); + dual.y = static_cast(0.5) * (-x[0].w * real.z + x[1].w * real.w + x[2].w * real.x); + dual.z = static_cast(0.5) * ( x[0].w * real.y - x[1].w * real.x + x[2].w * real.w); + dual.w = -static_cast(0.5) * ( x[0].w * real.x + x[1].w * real.y + x[2].w * real.z); + return tdualquat(real, dual); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/easing.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/easing.hpp new file mode 100644 index 000000000000..da89826a90eb --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/easing.hpp @@ -0,0 +1,219 @@ +/// @ref gtx_easing +/// @file glm/gtx/easing.hpp +/// @author Robert Chisholm +/// +/// @see core (dependence) +/// +/// @defgroup gtx_easing GLM_GTX_easing +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Easing functions for animations and transitions +/// All functions take a parameter x in the range [0.0,1.0] +/// +/// Based on the AHEasing project of Warren Moore (https://github.com/warrenm/AHEasing) + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/constants.hpp" +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_easing is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_easing extension included") +# endif +#endif + +namespace glm{ + /// @addtogroup gtx_easing + /// @{ + + /// Modelled after the line y = x + /// @see gtx_easing + template + GLM_FUNC_DECL genType linearInterpolation(genType const & a); + + /// Modelled after the parabola y = x^2 + /// @see gtx_easing + template + GLM_FUNC_DECL genType quadraticEaseIn(genType const & a); + + /// Modelled after the parabola y = -x^2 + 2x + /// @see gtx_easing + template + GLM_FUNC_DECL genType quadraticEaseOut(genType const & a); + + /// Modelled after the piecewise quadratic + /// y = (1/2)((2x)^2) ; [0, 0.5) + /// y = -(1/2)((2x-1)*(2x-3) - 1) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType quadraticEaseInOut(genType const & a); + + /// Modelled after the cubic y = x^3 + template + GLM_FUNC_DECL genType cubicEaseIn(genType const & a); + + /// Modelled after the cubic y = (x - 1)^3 + 1 + /// @see gtx_easing + template + GLM_FUNC_DECL genType cubicEaseOut(genType const & a); + + /// Modelled after the piecewise cubic + /// y = (1/2)((2x)^3) ; [0, 0.5) + /// y = (1/2)((2x-2)^3 + 2) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType cubicEaseInOut(genType const & a); + + /// Modelled after the quartic x^4 + /// @see gtx_easing + template + GLM_FUNC_DECL genType quarticEaseIn(genType const & a); + + /// Modelled after the quartic y = 1 - (x - 1)^4 + /// @see gtx_easing + template + GLM_FUNC_DECL genType quarticEaseOut(genType const & a); + + /// Modelled after the piecewise quartic + /// y = (1/2)((2x)^4) ; [0, 0.5) + /// y = -(1/2)((2x-2)^4 - 2) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType quarticEaseInOut(genType const & a); + + /// Modelled after the quintic y = x^5 + /// @see gtx_easing + template + GLM_FUNC_DECL genType quinticEaseIn(genType const & a); + + /// Modelled after the quintic y = (x - 1)^5 + 1 + /// @see gtx_easing + template + GLM_FUNC_DECL genType quinticEaseOut(genType const & a); + + /// Modelled after the piecewise quintic + /// y = (1/2)((2x)^5) ; [0, 0.5) + /// y = (1/2)((2x-2)^5 + 2) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType quinticEaseInOut(genType const & a); + + /// Modelled after quarter-cycle of sine wave + /// @see gtx_easing + template + GLM_FUNC_DECL genType sineEaseIn(genType const & a); + + /// Modelled after quarter-cycle of sine wave (different phase) + /// @see gtx_easing + template + GLM_FUNC_DECL genType sineEaseOut(genType const & a); + + /// Modelled after half sine wave + /// @see gtx_easing + template + GLM_FUNC_DECL genType sineEaseInOut(genType const & a); + + /// Modelled after shifted quadrant IV of unit circle + /// @see gtx_easing + template + GLM_FUNC_DECL genType circularEaseIn(genType const & a); + + /// Modelled after shifted quadrant II of unit circle + /// @see gtx_easing + template + GLM_FUNC_DECL genType circularEaseOut(genType const & a); + + /// Modelled after the piecewise circular function + /// y = (1/2)(1 - sqrt(1 - 4x^2)) ; [0, 0.5) + /// y = (1/2)(sqrt(-(2x - 3)*(2x - 1)) + 1) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType circularEaseInOut(genType const & a); + + /// Modelled after the exponential function y = 2^(10(x - 1)) + /// @see gtx_easing + template + GLM_FUNC_DECL genType exponentialEaseIn(genType const & a); + + /// Modelled after the exponential function y = -2^(-10x) + 1 + /// @see gtx_easing + template + GLM_FUNC_DECL genType exponentialEaseOut(genType const & a); + + /// Modelled after the piecewise exponential + /// y = (1/2)2^(10(2x - 1)) ; [0,0.5) + /// y = -(1/2)*2^(-10(2x - 1))) + 1 ; [0.5,1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType exponentialEaseInOut(genType const & a); + + /// Modelled after the damped sine wave y = sin(13pi/2*x)*pow(2, 10 * (x - 1)) + /// @see gtx_easing + template + GLM_FUNC_DECL genType elasticEaseIn(genType const & a); + + /// Modelled after the damped sine wave y = sin(-13pi/2*(x + 1))*pow(2, -10x) + 1 + /// @see gtx_easing + template + GLM_FUNC_DECL genType elasticEaseOut(genType const & a); + + /// Modelled after the piecewise exponentially-damped sine wave: + /// y = (1/2)*sin(13pi/2*(2*x))*pow(2, 10 * ((2*x) - 1)) ; [0,0.5) + /// y = (1/2)*(sin(-13pi/2*((2x-1)+1))*pow(2,-10(2*x-1)) + 2) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType elasticEaseInOut(genType const & a); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseIn(genType const& a); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseOut(genType const& a); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseInOut(genType const& a); + + /// @param a parameter + /// @param o Optional overshoot modifier + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseIn(genType const& a, genType const& o); + + /// @param a parameter + /// @param o Optional overshoot modifier + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseOut(genType const& a, genType const& o); + + /// @param a parameter + /// @param o Optional overshoot modifier + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseInOut(genType const& a, genType const& o); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType bounceEaseIn(genType const& a); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType bounceEaseOut(genType const& a); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType bounceEaseInOut(genType const& a); + + /// @} +}//namespace glm + +#include "easing.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/easing.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/easing.inl new file mode 100644 index 000000000000..b599c30664af --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/easing.inl @@ -0,0 +1,436 @@ +/// @ref gtx_easing + +#include + +namespace glm{ + + template + GLM_FUNC_QUALIFIER genType linearInterpolation(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return a; + } + + template + GLM_FUNC_QUALIFIER genType quadraticEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return a * a; + } + + template + GLM_FUNC_QUALIFIER genType quadraticEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return -(a * (a - static_cast(2))); + } + + template + GLM_FUNC_QUALIFIER genType quadraticEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + { + return static_cast(2) * a * a; + } + else + { + return (-static_cast(2) * a * a) + (4 * a) - one(); + } + } + + template + GLM_FUNC_QUALIFIER genType cubicEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return a * a * a; + } + + template + GLM_FUNC_QUALIFIER genType cubicEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType const f = a - one(); + return f * f * f + one(); + } + + template + GLM_FUNC_QUALIFIER genType cubicEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if (a < static_cast(0.5)) + { + return static_cast(4) * a * a * a; + } + else + { + genType const f = ((static_cast(2) * a) - static_cast(2)); + return static_cast(0.5) * f * f * f + one(); + } + } + + template + GLM_FUNC_QUALIFIER genType quarticEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return a * a * a * a; + } + + template + GLM_FUNC_QUALIFIER genType quarticEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType const f = (a - one()); + return f * f * f * (one() - a) + one(); + } + + template + GLM_FUNC_QUALIFIER genType quarticEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + { + return static_cast(8) * a * a * a * a; + } + else + { + genType const f = (a - one()); + return -static_cast(8) * f * f * f * f + one(); + } + } + + template + GLM_FUNC_QUALIFIER genType quinticEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return a * a * a * a * a; + } + + template + GLM_FUNC_QUALIFIER genType quinticEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType const f = (a - one()); + return f * f * f * f * f + one(); + } + + template + GLM_FUNC_QUALIFIER genType quinticEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + { + return static_cast(16) * a * a * a * a * a; + } + else + { + genType const f = ((static_cast(2) * a) - static_cast(2)); + return static_cast(0.5) * f * f * f * f * f + one(); + } + } + + template + GLM_FUNC_QUALIFIER genType sineEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return sin((a - one()) * half_pi()) + one(); + } + + template + GLM_FUNC_QUALIFIER genType sineEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return sin(a * half_pi()); + } + + template + GLM_FUNC_QUALIFIER genType sineEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return static_cast(0.5) * (one() - cos(a * pi())); + } + + template + GLM_FUNC_QUALIFIER genType circularEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return one() - sqrt(one() - (a * a)); + } + + template + GLM_FUNC_QUALIFIER genType circularEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return sqrt((static_cast(2) - a) * a); + } + + template + GLM_FUNC_QUALIFIER genType circularEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + { + return static_cast(0.5) * (one() - std::sqrt(one() - static_cast(4) * (a * a))); + } + else + { + return static_cast(0.5) * (std::sqrt(-((static_cast(2) * a) - static_cast(3)) * ((static_cast(2) * a) - one())) + one()); + } + } + + template + GLM_FUNC_QUALIFIER genType exponentialEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a <= zero()) + return a; + else + { + genType const Complementary = a - one(); + genType const Two = static_cast(2); + + return glm::pow(Two, Complementary * static_cast(10)); + } + } + + template + GLM_FUNC_QUALIFIER genType exponentialEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a >= one()) + return a; + else + { + return one() - glm::pow(static_cast(2), -static_cast(10) * a); + } + } + + template + GLM_FUNC_QUALIFIER genType exponentialEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + return static_cast(0.5) * glm::pow(static_cast(2), (static_cast(20) * a) - static_cast(10)); + else + return -static_cast(0.5) * glm::pow(static_cast(2), (-static_cast(20) * a) + static_cast(10)) + one(); + } + + template + GLM_FUNC_QUALIFIER genType elasticEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return std::sin(static_cast(13) * half_pi() * a) * glm::pow(static_cast(2), static_cast(10) * (a - one())); + } + + template + GLM_FUNC_QUALIFIER genType elasticEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return std::sin(-static_cast(13) * half_pi() * (a + one())) * glm::pow(static_cast(2), -static_cast(10) * a) + one(); + } + + template + GLM_FUNC_QUALIFIER genType elasticEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + return static_cast(0.5) * std::sin(static_cast(13) * half_pi() * (static_cast(2) * a)) * glm::pow(static_cast(2), static_cast(10) * ((static_cast(2) * a) - one())); + else + return static_cast(0.5) * (std::sin(-static_cast(13) * half_pi() * ((static_cast(2) * a - one()) + one())) * glm::pow(static_cast(2), -static_cast(10) * (static_cast(2) * a - one())) + static_cast(2)); + } + + template + GLM_FUNC_QUALIFIER genType backEaseIn(genType const& a, genType const& o) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType z = ((o + one()) * a) - o; + return (a * a * z); + } + + template + GLM_FUNC_QUALIFIER genType backEaseOut(genType const& a, genType const& o) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType n = a - one(); + genType z = ((o + one()) * n) + o; + return (n * n * z) + one(); + } + + template + GLM_FUNC_QUALIFIER genType backEaseInOut(genType const& a, genType const& o) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType s = o * static_cast(1.525); + genType x = static_cast(0.5); + genType n = a / static_cast(0.5); + + if (n < static_cast(1)) + { + genType z = ((s + static_cast(1)) * n) - s; + genType m = n * n * z; + return x * m; + } + else + { + n -= static_cast(2); + genType z = ((s + static_cast(1)) * n) + s; + genType m = (n*n*z) + static_cast(2); + return x * m; + } + } + + template + GLM_FUNC_QUALIFIER genType backEaseIn(genType const& a) + { + return backEaseIn(a, static_cast(1.70158)); + } + + template + GLM_FUNC_QUALIFIER genType backEaseOut(genType const& a) + { + return backEaseOut(a, static_cast(1.70158)); + } + + template + GLM_FUNC_QUALIFIER genType backEaseInOut(genType const& a) + { + return backEaseInOut(a, static_cast(1.70158)); + } + + template + GLM_FUNC_QUALIFIER genType bounceEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(4.0 / 11.0)) + { + return (static_cast(121) * a * a) / static_cast(16); + } + else if(a < static_cast(8.0 / 11.0)) + { + return (static_cast(363.0 / 40.0) * a * a) - (static_cast(99.0 / 10.0) * a) + static_cast(17.0 / 5.0); + } + else if(a < static_cast(9.0 / 10.0)) + { + return (static_cast(4356.0 / 361.0) * a * a) - (static_cast(35442.0 / 1805.0) * a) + static_cast(16061.0 / 1805.0); + } + else + { + return (static_cast(54.0 / 5.0) * a * a) - (static_cast(513.0 / 25.0) * a) + static_cast(268.0 / 25.0); + } + } + + template + GLM_FUNC_QUALIFIER genType bounceEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return one() - bounceEaseOut(one() - a); + } + + template + GLM_FUNC_QUALIFIER genType bounceEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + { + return static_cast(0.5) * (one() - bounceEaseOut(one() - a * static_cast(2))); + } + else + { + return static_cast(0.5) * bounceEaseOut(a * static_cast(2) - one()) + static_cast(0.5); + } + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/euler_angles.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/euler_angles.hpp new file mode 100644 index 000000000000..27236973af60 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/euler_angles.hpp @@ -0,0 +1,335 @@ +/// @ref gtx_euler_angles +/// @file glm/gtx/euler_angles.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_euler_angles GLM_GTX_euler_angles +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Build matrices from Euler angles. +/// +/// Extraction of Euler angles from rotation matrix. +/// Based on the original paper 2014 Mike Day - Extracting Euler Angles from a Rotation Matrix. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_euler_angles is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_euler_angles extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_euler_angles + /// @{ + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle X. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleX( + T const& angleX); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Y. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleY( + T const& angleY); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Z. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZ( + T const& angleZ); + + /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about X-axis. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleX( + T const & angleX, T const & angularVelocityX); + + /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Y-axis. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleY( + T const & angleY, T const & angularVelocityY); + + /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Z-axis. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleZ( + T const & angleZ, T const & angularVelocityZ); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXY( + T const& angleX, + T const& angleY); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYX( + T const& angleY, + T const& angleX); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZ( + T const& angleX, + T const& angleZ); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZX( + T const& angle, + T const& angleX); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZ( + T const& angleY, + T const& angleZ); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZY( + T const& angleZ, + T const& angleY); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYZ( + T const& t1, + T const& t2, + T const& t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXZ( + T const& yaw, + T const& pitch, + T const& roll); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZX( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYX( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXY( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZY( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYZ( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXZ( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZY( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZX( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYX( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXY( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> yawPitchRoll( + T const& yaw, + T const& pitch, + T const& roll); + + /// Creates a 2D 2 * 2 rotation matrix from an euler angle. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<2, 2, T, defaultp> orientate2(T const& angle); + + /// Creates a 2D 4 * 4 homogeneous rotation matrix from an euler angle. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<3, 3, T, defaultp> orientate3(T const& angle); + + /// Creates a 3D 3 * 3 rotation matrix from euler angles (Y * X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<3, 3, T, Q> orientate3(vec<3, T, Q> const& angles); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, Q> orientate4(vec<3, T, Q> const& angles); + + /// Extracts the (X * Y * Z) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Y * X * Z) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (X * Z * X) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (X * Y * X) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Y * X * Y) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Y * Z * Y) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Z * Y * Z) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Z * X * Z) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (X * Z * Y) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Y * Z * X) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Z * Y * X) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Z * X * Y) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DECL void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// @} +}//namespace glm + +#include "euler_angles.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/euler_angles.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/euler_angles.inl new file mode 100644 index 000000000000..134d499d91fc --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/euler_angles.inl @@ -0,0 +1,899 @@ +/// @ref gtx_euler_angles + +#include "compatibility.hpp" // glm::atan2 + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleX + ( + T const& angleX + ) + { + T cosX = glm::cos(angleX); + T sinX = glm::sin(angleX); + + return mat<4, 4, T, defaultp>( + T(1), T(0), T(0), T(0), + T(0), cosX, sinX, T(0), + T(0),-sinX, cosX, T(0), + T(0), T(0), T(0), T(1)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleY + ( + T const& angleY + ) + { + T cosY = glm::cos(angleY); + T sinY = glm::sin(angleY); + + return mat<4, 4, T, defaultp>( + cosY, T(0), -sinY, T(0), + T(0), T(1), T(0), T(0), + sinY, T(0), cosY, T(0), + T(0), T(0), T(0), T(1)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZ + ( + T const& angleZ + ) + { + T cosZ = glm::cos(angleZ); + T sinZ = glm::sin(angleZ); + + return mat<4, 4, T, defaultp>( + cosZ, sinZ, T(0), T(0), + -sinZ, cosZ, T(0), T(0), + T(0), T(0), T(1), T(0), + T(0), T(0), T(0), T(1)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleX + ( + T const & angleX, + T const & angularVelocityX + ) + { + T cosX = glm::cos(angleX) * angularVelocityX; + T sinX = glm::sin(angleX) * angularVelocityX; + + return mat<4, 4, T, defaultp>( + T(0), T(0), T(0), T(0), + T(0),-sinX, cosX, T(0), + T(0),-cosX,-sinX, T(0), + T(0), T(0), T(0), T(0)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleY + ( + T const & angleY, + T const & angularVelocityY + ) + { + T cosY = glm::cos(angleY) * angularVelocityY; + T sinY = glm::sin(angleY) * angularVelocityY; + + return mat<4, 4, T, defaultp>( + -sinY, T(0), -cosY, T(0), + T(0), T(0), T(0), T(0), + cosY, T(0), -sinY, T(0), + T(0), T(0), T(0), T(0)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleZ + ( + T const & angleZ, + T const & angularVelocityZ + ) + { + T cosZ = glm::cos(angleZ) * angularVelocityZ; + T sinZ = glm::sin(angleZ) * angularVelocityZ; + + return mat<4, 4, T, defaultp>( + -sinZ, cosZ, T(0), T(0), + -cosZ, -sinZ, T(0), T(0), + T(0), T(0), T(0), T(0), + T(0), T(0), T(0), T(0)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXY + ( + T const& angleX, + T const& angleY + ) + { + T cosX = glm::cos(angleX); + T sinX = glm::sin(angleX); + T cosY = glm::cos(angleY); + T sinY = glm::sin(angleY); + + return mat<4, 4, T, defaultp>( + cosY, -sinX * -sinY, cosX * -sinY, T(0), + T(0), cosX, sinX, T(0), + sinY, -sinX * cosY, cosX * cosY, T(0), + T(0), T(0), T(0), T(1)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYX + ( + T const& angleY, + T const& angleX + ) + { + T cosX = glm::cos(angleX); + T sinX = glm::sin(angleX); + T cosY = glm::cos(angleY); + T sinY = glm::sin(angleY); + + return mat<4, 4, T, defaultp>( + cosY, 0, -sinY, T(0), + sinY * sinX, cosX, cosY * sinX, T(0), + sinY * cosX, -sinX, cosY * cosX, T(0), + T(0), T(0), T(0), T(1)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZ + ( + T const& angleX, + T const& angleZ + ) + { + return eulerAngleX(angleX) * eulerAngleZ(angleZ); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZX + ( + T const& angleZ, + T const& angleX + ) + { + return eulerAngleZ(angleZ) * eulerAngleX(angleX); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZ + ( + T const& angleY, + T const& angleZ + ) + { + return eulerAngleY(angleY) * eulerAngleZ(angleZ); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZY + ( + T const& angleZ, + T const& angleY + ) + { + return eulerAngleZ(angleZ) * eulerAngleY(angleY); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYZ + ( + T const& t1, + T const& t2, + T const& t3 + ) + { + T c1 = glm::cos(-t1); + T c2 = glm::cos(-t2); + T c3 = glm::cos(-t3); + T s1 = glm::sin(-t1); + T s2 = glm::sin(-t2); + T s3 = glm::sin(-t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c2 * c3; + Result[0][1] =-c1 * s3 + s1 * s2 * c3; + Result[0][2] = s1 * s3 + c1 * s2 * c3; + Result[0][3] = static_cast(0); + Result[1][0] = c2 * s3; + Result[1][1] = c1 * c3 + s1 * s2 * s3; + Result[1][2] =-s1 * c3 + c1 * s2 * s3; + Result[1][3] = static_cast(0); + Result[2][0] =-s2; + Result[2][1] = s1 * c2; + Result[2][2] = c1 * c2; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYXZ + ( + T const& yaw, + T const& pitch, + T const& roll + ) + { + T tmp_ch = glm::cos(yaw); + T tmp_sh = glm::sin(yaw); + T tmp_cp = glm::cos(pitch); + T tmp_sp = glm::sin(pitch); + T tmp_cb = glm::cos(roll); + T tmp_sb = glm::sin(roll); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb; + Result[0][1] = tmp_sb * tmp_cp; + Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb; + Result[0][3] = static_cast(0); + Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb; + Result[1][1] = tmp_cb * tmp_cp; + Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb; + Result[1][3] = static_cast(0); + Result[2][0] = tmp_sh * tmp_cp; + Result[2][1] = -tmp_sp; + Result[2][2] = tmp_ch * tmp_cp; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZX + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c2; + Result[0][1] = c1 * s2; + Result[0][2] = s1 * s2; + Result[0][3] = static_cast(0); + Result[1][0] =-c3 * s2; + Result[1][1] = c1 * c2 * c3 - s1 * s3; + Result[1][2] = c1 * s3 + c2 * c3 * s1; + Result[1][3] = static_cast(0); + Result[2][0] = s2 * s3; + Result[2][1] =-c3 * s1 - c1 * c2 * s3; + Result[2][2] = c1 * c3 - c2 * s1 * s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYX + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c2; + Result[0][1] = s1 * s2; + Result[0][2] =-c1 * s2; + Result[0][3] = static_cast(0); + Result[1][0] = s2 * s3; + Result[1][1] = c1 * c3 - c2 * s1 * s3; + Result[1][2] = c3 * s1 + c1 * c2 * s3; + Result[1][3] = static_cast(0); + Result[2][0] = c3 * s2; + Result[2][1] =-c1 * s3 - c2 * c3 * s1; + Result[2][2] = c1 * c2 * c3 - s1 * s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYXY + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c3 - c2 * s1 * s3; + Result[0][1] = s2* s3; + Result[0][2] =-c3 * s1 - c1 * c2 * s3; + Result[0][3] = static_cast(0); + Result[1][0] = s1 * s2; + Result[1][1] = c2; + Result[1][2] = c1 * s2; + Result[1][3] = static_cast(0); + Result[2][0] = c1 * s3 + c2 * c3 * s1; + Result[2][1] =-c3 * s2; + Result[2][2] = c1 * c2 * c3 - s1 * s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZY + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c2 * c3 - s1 * s3; + Result[0][1] = c3 * s2; + Result[0][2] =-c1 * s3 - c2 * c3 * s1; + Result[0][3] = static_cast(0); + Result[1][0] =-c1 * s2; + Result[1][1] = c2; + Result[1][2] = s1 * s2; + Result[1][3] = static_cast(0); + Result[2][0] = c3 * s1 + c1 * c2 * s3; + Result[2][1] = s2 * s3; + Result[2][2] = c1 * c3 - c2 * s1 * s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZYZ + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c2 * c3 - s1 * s3; + Result[0][1] = c1 * s3 + c2 * c3 * s1; + Result[0][2] =-c3 * s2; + Result[0][3] = static_cast(0); + Result[1][0] =-c3 * s1 - c1 * c2 * s3; + Result[1][1] = c1 * c3 - c2 * s1 * s3; + Result[1][2] = s2 * s3; + Result[1][3] = static_cast(0); + Result[2][0] = c1 * s2; + Result[2][1] = s1 * s2; + Result[2][2] = c2; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZXZ + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c3 - c2 * s1 * s3; + Result[0][1] = c3 * s1 + c1 * c2 * s3; + Result[0][2] = s2 *s3; + Result[0][3] = static_cast(0); + Result[1][0] =-c1 * s3 - c2 * c3 * s1; + Result[1][1] = c1 * c2 * c3 - s1 * s3; + Result[1][2] = c3 * s2; + Result[1][3] = static_cast(0); + Result[2][0] = s1 * s2; + Result[2][1] =-c1 * s2; + Result[2][2] = c2; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZY + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c2 * c3; + Result[0][1] = s1 * s3 + c1 * c3 * s2; + Result[0][2] = c3 * s1 * s2 - c1 * s3; + Result[0][3] = static_cast(0); + Result[1][0] =-s2; + Result[1][1] = c1 * c2; + Result[1][2] = c2 * s1; + Result[1][3] = static_cast(0); + Result[2][0] = c2 * s3; + Result[2][1] = c1 * s2 * s3 - c3 * s1; + Result[2][2] = c1 * c3 + s1 * s2 *s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZX + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c2; + Result[0][1] = s2; + Result[0][2] =-c2 * s1; + Result[0][3] = static_cast(0); + Result[1][0] = s1 * s3 - c1 * c3 * s2; + Result[1][1] = c2 * c3; + Result[1][2] = c1 * s3 + c3 * s1 * s2; + Result[1][3] = static_cast(0); + Result[2][0] = c3 * s1 + c1 * s2 * s3; + Result[2][1] =-c2 * s3; + Result[2][2] = c1 * c3 - s1 * s2 * s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZYX + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c2; + Result[0][1] = c2 * s1; + Result[0][2] =-s2; + Result[0][3] = static_cast(0); + Result[1][0] = c1 * s2 * s3 - c3 * s1; + Result[1][1] = c1 * c3 + s1 * s2 * s3; + Result[1][2] = c2 * s3; + Result[1][3] = static_cast(0); + Result[2][0] = s1 * s3 + c1 * c3 * s2; + Result[2][1] = c3 * s1 * s2 - c1 * s3; + Result[2][2] = c2 * c3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZXY + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c3 - s1 * s2 * s3; + Result[0][1] = c3 * s1 + c1 * s2 * s3; + Result[0][2] =-c2 * s3; + Result[0][3] = static_cast(0); + Result[1][0] =-c2 * s1; + Result[1][1] = c1 * c2; + Result[1][2] = s2; + Result[1][3] = static_cast(0); + Result[2][0] = c1 * s3 + c3 * s1 * s2; + Result[2][1] = s1 * s3 - c1 * c3 * s2; + Result[2][2] = c2 * c3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> yawPitchRoll + ( + T const& yaw, + T const& pitch, + T const& roll + ) + { + T tmp_ch = glm::cos(yaw); + T tmp_sh = glm::sin(yaw); + T tmp_cp = glm::cos(pitch); + T tmp_sp = glm::sin(pitch); + T tmp_cb = glm::cos(roll); + T tmp_sb = glm::sin(roll); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb; + Result[0][1] = tmp_sb * tmp_cp; + Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb; + Result[0][3] = static_cast(0); + Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb; + Result[1][1] = tmp_cb * tmp_cp; + Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb; + Result[1][3] = static_cast(0); + Result[2][0] = tmp_sh * tmp_cp; + Result[2][1] = -tmp_sp; + Result[2][2] = tmp_ch * tmp_cp; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> orientate2 + ( + T const& angle + ) + { + T c = glm::cos(angle); + T s = glm::sin(angle); + + mat<2, 2, T, defaultp> Result; + Result[0][0] = c; + Result[0][1] = s; + Result[1][0] = -s; + Result[1][1] = c; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> orientate3 + ( + T const& angle + ) + { + T c = glm::cos(angle); + T s = glm::sin(angle); + + mat<3, 3, T, defaultp> Result; + Result[0][0] = c; + Result[0][1] = s; + Result[0][2] = T(0.0); + Result[1][0] = -s; + Result[1][1] = c; + Result[1][2] = T(0.0); + Result[2][0] = T(0.0); + Result[2][1] = T(0.0); + Result[2][2] = T(1.0); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> orientate3 + ( + vec<3, T, Q> const& angles + ) + { + return mat<3, 3, T, Q>(yawPitchRoll(angles.z, angles.x, angles.y)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> orientate4 + ( + vec<3, T, Q> const& angles + ) + { + return yawPitchRoll(angles.z, angles.x, angles.y); + } + + template + GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[2][1], M[2][2]); + T C2 = glm::sqrt(M[0][0]*M[0][0] + M[1][0]*M[1][0]); + T T2 = glm::atan2(-M[2][0], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(S1*M[0][2] - C1*M[0][1], C1*M[1][1] - S1*M[1][2 ]); + t1 = -T1; + t2 = -T2; + t3 = -T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[2][0], M[2][2]); + T C2 = glm::sqrt(M[0][1]*M[0][1] + M[1][1]*M[1][1]); + T T2 = glm::atan2(-M[2][1], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(S1*M[1][2] - C1*M[1][0], C1*M[0][0] - S1*M[0][2]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[0][2], M[0][1]); + T S2 = glm::sqrt(M[1][0]*M[1][0] + M[2][0]*M[2][0]); + T T2 = glm::atan2(S2, M[0][0]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(C1*M[1][2] - S1*M[1][1], C1*M[2][2] - S1*M[2][1]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[0][1], -M[0][2]); + T S2 = glm::sqrt(M[1][0]*M[1][0] + M[2][0]*M[2][0]); + T T2 = glm::atan2(S2, M[0][0]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(-C1*M[2][1] - S1*M[2][2], C1*M[1][1] + S1*M[1][2]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[1][0], M[1][2]); + T S2 = glm::sqrt(M[0][1]*M[0][1] + M[2][1]*M[2][1]); + T T2 = glm::atan2(S2, M[1][1]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(C1*M[2][0] - S1*M[2][2], C1*M[0][0] - S1*M[0][2]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[1][2], -M[1][0]); + T S2 = glm::sqrt(M[0][1]*M[0][1] + M[2][1]*M[2][1]); + T T2 = glm::atan2(S2, M[1][1]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(-S1*M[0][0] - C1*M[0][2], S1*M[2][0] + C1*M[2][2]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[2][1], M[2][0]); + T S2 = glm::sqrt(M[0][2]*M[0][2] + M[1][2]*M[1][2]); + T T2 = glm::atan2(S2, M[2][2]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(C1*M[0][1] - S1*M[0][0], C1*M[1][1] - S1*M[1][0]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[2][0], -M[2][1]); + T S2 = glm::sqrt(M[0][2]*M[0][2] + M[1][2]*M[1][2]); + T T2 = glm::atan2(S2, M[2][2]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(-C1*M[1][0] - S1*M[1][1], C1*M[0][0] + S1*M[0][1]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[1][2], M[1][1]); + T C2 = glm::sqrt(M[0][0]*M[0][0] + M[2][0]*M[2][0]); + T T2 = glm::atan2(-M[1][0], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(S1*M[0][1] - C1*M[0][2], C1*M[2][2] - S1*M[2][1]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(-M[0][2], M[0][0]); + T C2 = glm::sqrt(M[1][1]*M[1][1] + M[2][1]*M[2][1]); + T T2 = glm::atan2(M[0][1], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(S1*M[1][0] + C1*M[1][2], S1*M[2][0] + C1*M[2][2]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[0][1], M[0][0]); + T C2 = glm::sqrt(M[1][2]*M[1][2] + M[2][2]*M[2][2]); + T T2 = glm::atan2(-M[0][2], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(S1*M[2][0] - C1*M[2][1], C1*M[1][1] - S1*M[1][0]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(-M[1][0], M[1][1]); + T C2 = glm::sqrt(M[0][2]*M[0][2] + M[2][2]*M[2][2]); + T T2 = glm::atan2(M[1][2], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(C1*M[2][0] + S1*M[2][1], C1*M[0][0] + S1*M[0][1]); + t1 = T1; + t2 = T2; + t3 = T3; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/extend.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/extend.hpp new file mode 100644 index 000000000000..28b7c5c014a4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/extend.hpp @@ -0,0 +1,42 @@ +/// @ref gtx_extend +/// @file glm/gtx/extend.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_extend GLM_GTX_extend +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Extend a position from a source to a position at a defined length. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_extend is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_extend extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_extend + /// @{ + + /// Extends of Length the Origin position using the (Source - Origin) direction. + /// @see gtx_extend + template + GLM_FUNC_DECL genType extend( + genType const& Origin, + genType const& Source, + typename genType::value_type const Length); + + /// @} +}//namespace glm + +#include "extend.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/extend.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/extend.inl new file mode 100644 index 000000000000..32128eb209ac --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/extend.inl @@ -0,0 +1,48 @@ +/// @ref gtx_extend + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType extend + ( + genType const& Origin, + genType const& Source, + genType const& Distance + ) + { + return Origin + (Source - Origin) * Distance; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> extend + ( + vec<2, T, Q> const& Origin, + vec<2, T, Q> const& Source, + T const& Distance + ) + { + return Origin + (Source - Origin) * Distance; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> extend + ( + vec<3, T, Q> const& Origin, + vec<3, T, Q> const& Source, + T const& Distance + ) + { + return Origin + (Source - Origin) * Distance; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> extend + ( + vec<4, T, Q> const& Origin, + vec<4, T, Q> const& Source, + T const& Distance + ) + { + return Origin + (Source - Origin) * Distance; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/extended_min_max.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/extended_min_max.hpp new file mode 100644 index 000000000000..20cd89b0d519 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/extended_min_max.hpp @@ -0,0 +1,137 @@ +/// @ref gtx_extended_min_max +/// @file glm/gtx/extended_min_max.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_extended_min_max GLM_GTX_extended_min_max +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Min and max functions for 3 to 4 parameters. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../ext/vector_common.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_extended_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_extended_min_max extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_extended_min_max + /// @{ + + /// Return the minimum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template + GLM_FUNC_DECL T min( + T const& x, + T const& y, + T const& z); + + /// Return the minimum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C min( + C const& x, + typename C::T const& y, + typename C::T const& z); + + /// Return the minimum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C min( + C const& x, + C const& y, + C const& z); + + /// Return the minimum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template + GLM_FUNC_DECL T min( + T const& x, + T const& y, + T const& z, + T const& w); + + /// Return the minimum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C min( + C const& x, + typename C::T const& y, + typename C::T const& z, + typename C::T const& w); + + /// Return the minimum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C min( + C const& x, + C const& y, + C const& z, + C const& w); + + /// Return the maximum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template + GLM_FUNC_DECL T max( + T const& x, + T const& y, + T const& z); + + /// Return the maximum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C max( + C const& x, + typename C::T const& y, + typename C::T const& z); + + /// Return the maximum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C max( + C const& x, + C const& y, + C const& z); + + /// Return the maximum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template + GLM_FUNC_DECL T max( + T const& x, + T const& y, + T const& z, + T const& w); + + /// Return the maximum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C max( + C const& x, + typename C::T const& y, + typename C::T const& z, + typename C::T const& w); + + /// Return the maximum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C max( + C const& x, + C const& y, + C const& z, + C const& w); + + /// @} +}//namespace glm + +#include "extended_min_max.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/extended_min_max.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/extended_min_max.inl new file mode 100644 index 000000000000..de5998fadd65 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/extended_min_max.inl @@ -0,0 +1,138 @@ +/// @ref gtx_extended_min_max + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T min( + T const& x, + T const& y, + T const& z) + { + return glm::min(glm::min(x, y), z); + } + + template class C> + GLM_FUNC_QUALIFIER C min + ( + C const& x, + typename C::T const& y, + typename C::T const& z + ) + { + return glm::min(glm::min(x, y), z); + } + + template class C> + GLM_FUNC_QUALIFIER C min + ( + C const& x, + C const& y, + C const& z + ) + { + return glm::min(glm::min(x, y), z); + } + + template + GLM_FUNC_QUALIFIER T min + ( + T const& x, + T const& y, + T const& z, + T const& w + ) + { + return glm::min(glm::min(x, y), glm::min(z, w)); + } + + template class C> + GLM_FUNC_QUALIFIER C min + ( + C const& x, + typename C::T const& y, + typename C::T const& z, + typename C::T const& w + ) + { + return glm::min(glm::min(x, y), glm::min(z, w)); + } + + template class C> + GLM_FUNC_QUALIFIER C min + ( + C const& x, + C const& y, + C const& z, + C const& w + ) + { + return glm::min(glm::min(x, y), glm::min(z, w)); + } + + template + GLM_FUNC_QUALIFIER T max( + T const& x, + T const& y, + T const& z) + { + return glm::max(glm::max(x, y), z); + } + + template class C> + GLM_FUNC_QUALIFIER C max + ( + C const& x, + typename C::T const& y, + typename C::T const& z + ) + { + return glm::max(glm::max(x, y), z); + } + + template class C> + GLM_FUNC_QUALIFIER C max + ( + C const& x, + C const& y, + C const& z + ) + { + return glm::max(glm::max(x, y), z); + } + + template + GLM_FUNC_QUALIFIER T max + ( + T const& x, + T const& y, + T const& z, + T const& w + ) + { + return glm::max(glm::max(x, y), glm::max(z, w)); + } + + template class C> + GLM_FUNC_QUALIFIER C max + ( + C const& x, + typename C::T const& y, + typename C::T const& z, + typename C::T const& w + ) + { + return glm::max(glm::max(x, y), glm::max(z, w)); + } + + template class C> + GLM_FUNC_QUALIFIER C max + ( + C const& x, + C const& y, + C const& z, + C const& w + ) + { + return glm::max(glm::max(x, y), glm::max(z, w)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/exterior_product.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/exterior_product.hpp new file mode 100644 index 000000000000..7d6c2e19b112 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/exterior_product.hpp @@ -0,0 +1,45 @@ +/// @ref gtx_exterior_product +/// @file glm/gtx/exterior_product.hpp +/// +/// @see core (dependence) +/// @see gtx_exterior_product (dependence) +/// +/// @defgroup gtx_exterior_product GLM_GTX_exterior_product +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// @brief Allow to perform bit operations on integer values + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_exterior_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_exterior_product extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_exterior_product + /// @{ + + /// Returns the cross product of x and y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see Exterior product + template + GLM_FUNC_DECL GLM_CONSTEXPR T cross(vec<2, T, Q> const& v, vec<2, T, Q> const& u); + + /// @} +} //namespace glm + +#include "exterior_product.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/exterior_product.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/exterior_product.inl new file mode 100644 index 000000000000..690085d6f669 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/exterior_product.inl @@ -0,0 +1,26 @@ +/// @ref gtx_exterior_product + +#include + +namespace glm { +namespace detail +{ + template + struct compute_cross_vec2 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<2, T, Q> const& v, vec<2, T, Q> const& u) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'cross' accepts only floating-point inputs"); + + return v.x * u.y - u.x * v.y; + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T cross(vec<2, T, Q> const& x, vec<2, T, Q> const& y) + { + return detail::compute_cross_vec2::value>::call(x, y); + } +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_exponential.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_exponential.hpp new file mode 100644 index 000000000000..6fb7286528cf --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_exponential.hpp @@ -0,0 +1,95 @@ +/// @ref gtx_fast_exponential +/// @file glm/gtx/fast_exponential.hpp +/// +/// @see core (dependence) +/// @see gtx_half_float (dependence) +/// +/// @defgroup gtx_fast_exponential GLM_GTX_fast_exponential +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Fast but less accurate implementations of exponential based functions. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_fast_exponential is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_fast_exponential extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_fast_exponential + /// @{ + + /// Faster than the common pow function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL genType fastPow(genType x, genType y); + + /// Faster than the common pow function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastPow(vec const& x, vec const& y); + + /// Faster than the common pow function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL genTypeT fastPow(genTypeT x, genTypeU y); + + /// Faster than the common pow function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastPow(vec const& x); + + /// Faster than the common exp function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL T fastExp(T x); + + /// Faster than the common exp function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastExp(vec const& x); + + /// Faster than the common log function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL T fastLog(T x); + + /// Faster than the common exp2 function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastLog(vec const& x); + + /// Faster than the common exp2 function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL T fastExp2(T x); + + /// Faster than the common exp2 function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastExp2(vec const& x); + + /// Faster than the common log2 function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL T fastLog2(T x); + + /// Faster than the common log2 function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastLog2(vec const& x); + + /// @} +}//namespace glm + +#include "fast_exponential.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_exponential.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_exponential.inl new file mode 100644 index 000000000000..5b1174246bbc --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_exponential.inl @@ -0,0 +1,136 @@ +/// @ref gtx_fast_exponential + +namespace glm +{ + // fastPow: + template + GLM_FUNC_QUALIFIER genType fastPow(genType x, genType y) + { + return exp(y * log(x)); + } + + template + GLM_FUNC_QUALIFIER vec fastPow(vec const& x, vec const& y) + { + return exp(y * log(x)); + } + + template + GLM_FUNC_QUALIFIER T fastPow(T x, int y) + { + T f = static_cast(1); + for(int i = 0; i < y; ++i) + f *= x; + return f; + } + + template + GLM_FUNC_QUALIFIER vec fastPow(vec const& x, vec const& y) + { + vec Result; + for(length_t i = 0, n = x.length(); i < n; ++i) + Result[i] = fastPow(x[i], y[i]); + return Result; + } + + // fastExp + // Note: This function provides accurate results only for value between -1 and 1, else avoid it. + template + GLM_FUNC_QUALIFIER T fastExp(T x) + { + // This has a better looking and same performance in release mode than the following code. However, in debug mode it's slower. + // return 1.0f + x * (1.0f + x * 0.5f * (1.0f + x * 0.3333333333f * (1.0f + x * 0.25 * (1.0f + x * 0.2f)))); + T x2 = x * x; + T x3 = x2 * x; + T x4 = x3 * x; + T x5 = x4 * x; + return T(1) + x + (x2 * T(0.5)) + (x3 * T(0.1666666667)) + (x4 * T(0.041666667)) + (x5 * T(0.008333333333)); + } + /* // Try to handle all values of float... but often shower than std::exp, glm::floor and the loop kill the performance + GLM_FUNC_QUALIFIER float fastExp(float x) + { + const float e = 2.718281828f; + const float IntegerPart = floor(x); + const float FloatPart = x - IntegerPart; + float z = 1.f; + + for(int i = 0; i < int(IntegerPart); ++i) + z *= e; + + const float x2 = FloatPart * FloatPart; + const float x3 = x2 * FloatPart; + const float x4 = x3 * FloatPart; + const float x5 = x4 * FloatPart; + return z * (1.0f + FloatPart + (x2 * 0.5f) + (x3 * 0.1666666667f) + (x4 * 0.041666667f) + (x5 * 0.008333333333f)); + } + + // Increase accuracy on number bigger that 1 and smaller than -1 but it's not enough for high and negative numbers + GLM_FUNC_QUALIFIER float fastExp(float x) + { + // This has a better looking and same performance in release mode than the following code. However, in debug mode it's slower. + // return 1.0f + x * (1.0f + x * 0.5f * (1.0f + x * 0.3333333333f * (1.0f + x * 0.25 * (1.0f + x * 0.2f)))); + float x2 = x * x; + float x3 = x2 * x; + float x4 = x3 * x; + float x5 = x4 * x; + float x6 = x5 * x; + float x7 = x6 * x; + float x8 = x7 * x; + return 1.0f + x + (x2 * 0.5f) + (x3 * 0.1666666667f) + (x4 * 0.041666667f) + (x5 * 0.008333333333f)+ (x6 * 0.00138888888888f) + (x7 * 0.000198412698f) + (x8 * 0.0000248015873f);; + } + */ + + template + GLM_FUNC_QUALIFIER vec fastExp(vec const& x) + { + return detail::functor1::call(fastExp, x); + } + + // fastLog + template + GLM_FUNC_QUALIFIER genType fastLog(genType x) + { + return std::log(x); + } + + /* Slower than the VC7.1 function... + GLM_FUNC_QUALIFIER float fastLog(float x) + { + float y1 = (x - 1.0f) / (x + 1.0f); + float y2 = y1 * y1; + return 2.0f * y1 * (1.0f + y2 * (0.3333333333f + y2 * (0.2f + y2 * 0.1428571429f))); + } + */ + + template + GLM_FUNC_QUALIFIER vec fastLog(vec const& x) + { + return detail::functor1::call(fastLog, x); + } + + //fastExp2, ln2 = 0.69314718055994530941723212145818f + template + GLM_FUNC_QUALIFIER genType fastExp2(genType x) + { + return fastExp(static_cast(0.69314718055994530941723212145818) * x); + } + + template + GLM_FUNC_QUALIFIER vec fastExp2(vec const& x) + { + return detail::functor1::call(fastExp2, x); + } + + // fastLog2, ln2 = 0.69314718055994530941723212145818f + template + GLM_FUNC_QUALIFIER genType fastLog2(genType x) + { + return fastLog(x) / static_cast(0.69314718055994530941723212145818); + } + + template + GLM_FUNC_QUALIFIER vec fastLog2(vec const& x) + { + return detail::functor1::call(fastLog2, x); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_square_root.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_square_root.hpp new file mode 100644 index 000000000000..ac42a9c006cb --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_square_root.hpp @@ -0,0 +1,98 @@ +/// @ref gtx_fast_square_root +/// @file glm/gtx/fast_square_root.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_fast_square_root GLM_GTX_fast_square_root +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Fast but less accurate implementations of square root based functions. +/// - Sqrt optimisation based on Newton's method, +/// www.gamedev.net/community/forums/topic.asp?topic id=139956 + +#pragma once + +// Dependency: +#include "../common.hpp" +#include "../exponential.hpp" +#include "../geometric.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_fast_square_root is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_fast_square_root extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_fast_square_root + /// @{ + + /// Faster than the common sqrt function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL genType fastSqrt(genType x); + + /// Faster than the common sqrt function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL vec fastSqrt(vec const& x); + + /// Faster than the common inversesqrt function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL genType fastInverseSqrt(genType x); + + /// Faster than the common inversesqrt function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL vec fastInverseSqrt(vec const& x); + + /// Faster than the common length function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL genType fastLength(genType x); + + /// Faster than the common length function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL T fastLength(vec const& x); + + /// Faster than the common distance function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL genType fastDistance(genType x, genType y); + + /// Faster than the common distance function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL T fastDistance(vec const& x, vec const& y); + + /// Faster than the common normalize function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL genType fastNormalize(genType x); + + /// Faster than the common normalize function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL vec fastNormalize(vec const& x); + + /// @} +}// namespace glm + +#include "fast_square_root.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_square_root.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_square_root.inl new file mode 100644 index 000000000000..60fdb7a5ac7c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_square_root.inl @@ -0,0 +1,75 @@ +/// @ref gtx_fast_square_root + +namespace glm +{ + // fastSqrt + template + GLM_FUNC_QUALIFIER genType fastSqrt(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fastSqrt' only accept floating-point input"); + + return genType(1) / fastInverseSqrt(x); + } + + template + GLM_FUNC_QUALIFIER vec fastSqrt(vec const& x) + { + return detail::functor1::call(fastSqrt, x); + } + + // fastInversesqrt + template + GLM_FUNC_QUALIFIER genType fastInverseSqrt(genType x) + { + return detail::compute_inversesqrt<1, genType, lowp, detail::is_aligned::value>::call(vec<1, genType, lowp>(x)).x; + } + + template + GLM_FUNC_QUALIFIER vec fastInverseSqrt(vec const& x) + { + return detail::compute_inversesqrt::value>::call(x); + } + + // fastLength + template + GLM_FUNC_QUALIFIER genType fastLength(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fastLength' only accept floating-point inputs"); + + return abs(x); + } + + template + GLM_FUNC_QUALIFIER T fastLength(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fastLength' only accept floating-point inputs"); + + return fastSqrt(dot(x, x)); + } + + // fastDistance + template + GLM_FUNC_QUALIFIER genType fastDistance(genType x, genType y) + { + return fastLength(y - x); + } + + template + GLM_FUNC_QUALIFIER T fastDistance(vec const& x, vec const& y) + { + return fastLength(y - x); + } + + // fastNormalize + template + GLM_FUNC_QUALIFIER genType fastNormalize(genType x) + { + return x > genType(0) ? genType(1) : -genType(1); + } + + template + GLM_FUNC_QUALIFIER vec fastNormalize(vec const& x) + { + return x * fastInverseSqrt(dot(x, x)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_trigonometry.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_trigonometry.hpp new file mode 100644 index 000000000000..2650d6e4d6e3 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_trigonometry.hpp @@ -0,0 +1,79 @@ +/// @ref gtx_fast_trigonometry +/// @file glm/gtx/fast_trigonometry.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_fast_trigonometry GLM_GTX_fast_trigonometry +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Fast but less accurate implementations of trigonometric functions. + +#pragma once + +// Dependency: +#include "../gtc/constants.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_fast_trigonometry is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_fast_trigonometry extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_fast_trigonometry + /// @{ + + /// Wrap an angle to [0 2pi[ + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T wrapAngle(T angle); + + /// Faster than the common sin function but less accurate. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastSin(T angle); + + /// Faster than the common cos function but less accurate. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastCos(T angle); + + /// Faster than the common tan function but less accurate. + /// Defined between -2pi and 2pi. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastTan(T angle); + + /// Faster than the common asin function but less accurate. + /// Defined between -2pi and 2pi. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastAsin(T angle); + + /// Faster than the common acos function but less accurate. + /// Defined between -2pi and 2pi. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastAcos(T angle); + + /// Faster than the common atan function but less accurate. + /// Defined between -2pi and 2pi. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastAtan(T y, T x); + + /// Faster than the common atan function but less accurate. + /// Defined between -2pi and 2pi. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastAtan(T angle); + + /// @} +}//namespace glm + +#include "fast_trigonometry.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_trigonometry.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_trigonometry.inl new file mode 100644 index 000000000000..1a710cbcd08d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/fast_trigonometry.inl @@ -0,0 +1,142 @@ +/// @ref gtx_fast_trigonometry + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_QUALIFIER vec taylorCos(vec const& x) + { + return static_cast(1) + - (x * x) * (1.f / 2.f) + + ((x * x) * (x * x)) * (1.f / 24.f) + - (((x * x) * (x * x)) * (x * x)) * (1.f / 720.f) + + (((x * x) * (x * x)) * ((x * x) * (x * x))) * (1.f / 40320.f); + } + + template + GLM_FUNC_QUALIFIER T cos_52s(T x) + { + T const xx(x * x); + return (T(0.9999932946) + xx * (T(-0.4999124376) + xx * (T(0.0414877472) + xx * T(-0.0012712095)))); + } + + template + GLM_FUNC_QUALIFIER vec cos_52s(vec const& x) + { + return detail::functor1::call(cos_52s, x); + } +}//namespace detail + + // wrapAngle + template + GLM_FUNC_QUALIFIER T wrapAngle(T angle) + { + return abs(mod(angle, two_pi())); + } + + template + GLM_FUNC_QUALIFIER vec wrapAngle(vec const& x) + { + return detail::functor1::call(wrapAngle, x); + } + + // cos + template + GLM_FUNC_QUALIFIER T fastCos(T x) + { + T const angle(wrapAngle(x)); + + if(angle < half_pi()) + return detail::cos_52s(angle); + if(angle < pi()) + return -detail::cos_52s(pi() - angle); + if(angle < (T(3) * half_pi())) + return -detail::cos_52s(angle - pi()); + + return detail::cos_52s(two_pi() - angle); + } + + template + GLM_FUNC_QUALIFIER vec fastCos(vec const& x) + { + return detail::functor1::call(fastCos, x); + } + + // sin + template + GLM_FUNC_QUALIFIER T fastSin(T x) + { + return fastCos(half_pi() - x); + } + + template + GLM_FUNC_QUALIFIER vec fastSin(vec const& x) + { + return detail::functor1::call(fastSin, x); + } + + // tan + template + GLM_FUNC_QUALIFIER T fastTan(T x) + { + return x + (x * x * x * T(0.3333333333)) + (x * x * x * x * x * T(0.1333333333333)) + (x * x * x * x * x * x * x * T(0.0539682539)); + } + + template + GLM_FUNC_QUALIFIER vec fastTan(vec const& x) + { + return detail::functor1::call(fastTan, x); + } + + // asin + template + GLM_FUNC_QUALIFIER T fastAsin(T x) + { + return x + (x * x * x * T(0.166666667)) + (x * x * x * x * x * T(0.075)) + (x * x * x * x * x * x * x * T(0.0446428571)) + (x * x * x * x * x * x * x * x * x * T(0.0303819444));// + (x * x * x * x * x * x * x * x * x * x * x * T(0.022372159)); + } + + template + GLM_FUNC_QUALIFIER vec fastAsin(vec const& x) + { + return detail::functor1::call(fastAsin, x); + } + + // acos + template + GLM_FUNC_QUALIFIER T fastAcos(T x) + { + return T(1.5707963267948966192313216916398) - fastAsin(x); //(PI / 2) + } + + template + GLM_FUNC_QUALIFIER vec fastAcos(vec const& x) + { + return detail::functor1::call(fastAcos, x); + } + + // atan + template + GLM_FUNC_QUALIFIER T fastAtan(T y, T x) + { + T sgn = sign(y) * sign(x); + return abs(fastAtan(y / x)) * sgn; + } + + template + GLM_FUNC_QUALIFIER vec fastAtan(vec const& y, vec const& x) + { + return detail::functor2::call(fastAtan, y, x); + } + + template + GLM_FUNC_QUALIFIER T fastAtan(T x) + { + return x - (x * x * x * T(0.333333333333)) + (x * x * x * x * x * T(0.2)) - (x * x * x * x * x * x * x * T(0.1428571429)) + (x * x * x * x * x * x * x * x * x * T(0.111111111111)) - (x * x * x * x * x * x * x * x * x * x * x * T(0.0909090909)); + } + + template + GLM_FUNC_QUALIFIER vec fastAtan(vec const& x) + { + return detail::functor1::call(fastAtan, x); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/float_notmalize.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/float_notmalize.inl new file mode 100644 index 000000000000..8cdbc5aaa9c3 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/float_notmalize.inl @@ -0,0 +1,13 @@ +/// @ref gtx_float_normalize + +#include + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec floatNormalize(vec const& v) + { + return vec(v) / static_cast(std::numeric_limits::max()); + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/functions.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/functions.hpp new file mode 100644 index 000000000000..9f4166c4c1c8 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/functions.hpp @@ -0,0 +1,56 @@ +/// @ref gtx_functions +/// @file glm/gtx/functions.hpp +/// +/// @see core (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtx_functions GLM_GTX_functions +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// List of useful common functions. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/type_vec2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_functions is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_functions extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_functions + /// @{ + + /// 1D gauss function + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL T gauss( + T x, + T ExpectedValue, + T StandardDeviation); + + /// 2D gauss function + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL T gauss( + vec<2, T, Q> const& Coord, + vec<2, T, Q> const& ExpectedValue, + vec<2, T, Q> const& StandardDeviation); + + /// @} +}//namespace glm + +#include "functions.inl" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/functions.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/functions.inl new file mode 100644 index 000000000000..29cbb20b80fa --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/functions.inl @@ -0,0 +1,30 @@ +/// @ref gtx_functions + +#include "../exponential.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T gauss + ( + T x, + T ExpectedValue, + T StandardDeviation + ) + { + return exp(-((x - ExpectedValue) * (x - ExpectedValue)) / (static_cast(2) * StandardDeviation * StandardDeviation)) / (StandardDeviation * sqrt(static_cast(6.28318530717958647692528676655900576))); + } + + template + GLM_FUNC_QUALIFIER T gauss + ( + vec<2, T, Q> const& Coord, + vec<2, T, Q> const& ExpectedValue, + vec<2, T, Q> const& StandardDeviation + ) + { + vec<2, T, Q> const Squared = ((Coord - ExpectedValue) * (Coord - ExpectedValue)) / (static_cast(2) * StandardDeviation * StandardDeviation); + return exp(-(Squared.x + Squared.y)); + } +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/gradient_paint.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/gradient_paint.hpp new file mode 100644 index 000000000000..6f85bf482d9f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/gradient_paint.hpp @@ -0,0 +1,53 @@ +/// @ref gtx_gradient_paint +/// @file glm/gtx/gradient_paint.hpp +/// +/// @see core (dependence) +/// @see gtx_optimum_pow (dependence) +/// +/// @defgroup gtx_gradient_paint GLM_GTX_gradient_paint +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Functions that return the color of procedural gradient for specific coordinates. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/optimum_pow.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_gradient_paint is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_gradient_paint extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_gradient_paint + /// @{ + + /// Return a color from a radial gradient. + /// @see - gtx_gradient_paint + template + GLM_FUNC_DECL T radialGradient( + vec<2, T, Q> const& Center, + T const& Radius, + vec<2, T, Q> const& Focal, + vec<2, T, Q> const& Position); + + /// Return a color from a linear gradient. + /// @see - gtx_gradient_paint + template + GLM_FUNC_DECL T linearGradient( + vec<2, T, Q> const& Point0, + vec<2, T, Q> const& Point1, + vec<2, T, Q> const& Position); + + /// @} +}// namespace glm + +#include "gradient_paint.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/gradient_paint.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/gradient_paint.inl new file mode 100644 index 000000000000..4c495e62cbff --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/gradient_paint.inl @@ -0,0 +1,36 @@ +/// @ref gtx_gradient_paint + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T radialGradient + ( + vec<2, T, Q> const& Center, + T const& Radius, + vec<2, T, Q> const& Focal, + vec<2, T, Q> const& Position + ) + { + vec<2, T, Q> F = Focal - Center; + vec<2, T, Q> D = Position - Focal; + T Radius2 = pow2(Radius); + T Fx2 = pow2(F.x); + T Fy2 = pow2(F.y); + + T Numerator = (D.x * F.x + D.y * F.y) + sqrt(Radius2 * (pow2(D.x) + pow2(D.y)) - pow2(D.x * F.y - D.y * F.x)); + T Denominator = Radius2 - (Fx2 + Fy2); + return Numerator / Denominator; + } + + template + GLM_FUNC_QUALIFIER T linearGradient + ( + vec<2, T, Q> const& Point0, + vec<2, T, Q> const& Point1, + vec<2, T, Q> const& Position + ) + { + vec<2, T, Q> Dist = Point1 - Point0; + return (Dist.x * (Position.x - Point0.x) + Dist.y * (Position.y - Point0.y)) / glm::dot(Dist, Dist); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/handed_coordinate_space.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/handed_coordinate_space.hpp new file mode 100644 index 000000000000..e59e3e2ac273 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/handed_coordinate_space.hpp @@ -0,0 +1,50 @@ +/// @ref gtx_handed_coordinate_space +/// @file glm/gtx/handed_coordinate_space.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_handed_coordinate_space GLM_GTX_handed_coordinate_space +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// To know if a set of three basis vectors defines a right or left-handed coordinate system. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_handed_coordinate_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_handed_coordinate_space extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_handed_coordinate_space + /// @{ + + //! Return if a trihedron right handed or not. + //! From GLM_GTX_handed_coordinate_space extension. + template + GLM_FUNC_DECL bool rightHanded( + vec<3, T, Q> const& tangent, + vec<3, T, Q> const& binormal, + vec<3, T, Q> const& normal); + + //! Return if a trihedron left handed or not. + //! From GLM_GTX_handed_coordinate_space extension. + template + GLM_FUNC_DECL bool leftHanded( + vec<3, T, Q> const& tangent, + vec<3, T, Q> const& binormal, + vec<3, T, Q> const& normal); + + /// @} +}// namespace glm + +#include "handed_coordinate_space.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/handed_coordinate_space.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/handed_coordinate_space.inl new file mode 100644 index 000000000000..e43c17bd3120 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/handed_coordinate_space.inl @@ -0,0 +1,26 @@ +/// @ref gtx_handed_coordinate_space + +namespace glm +{ + template + GLM_FUNC_QUALIFIER bool rightHanded + ( + vec<3, T, Q> const& tangent, + vec<3, T, Q> const& binormal, + vec<3, T, Q> const& normal + ) + { + return dot(cross(normal, tangent), binormal) > T(0); + } + + template + GLM_FUNC_QUALIFIER bool leftHanded + ( + vec<3, T, Q> const& tangent, + vec<3, T, Q> const& binormal, + vec<3, T, Q> const& normal + ) + { + return dot(cross(normal, tangent), binormal) < T(0); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/hash.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/hash.hpp new file mode 100644 index 000000000000..ef89290b439f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/hash.hpp @@ -0,0 +1,146 @@ +/// @ref gtx_hash +/// @file glm/gtx/hash.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_hash GLM_GTX_hash +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Add std::hash support for glm types + +#pragma once + +#if defined(GLM_FORCE_MESSAGES) && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_hash is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_hash extension included") +# endif +#endif + +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../gtc/vec1.hpp" + +#include "../gtc/quaternion.hpp" +#include "../gtx/dual_quaternion.hpp" + +#include "../mat2x2.hpp" +#include "../mat2x3.hpp" +#include "../mat2x4.hpp" + +#include "../mat3x2.hpp" +#include "../mat3x3.hpp" +#include "../mat3x4.hpp" + +#include "../mat4x2.hpp" +#include "../mat4x3.hpp" +#include "../mat4x4.hpp" + +#if __cplusplus < 201103L +#pragma message("GLM_GTX_hash requires C++11 standard library support") +#endif + +#if GLM_LANG & GLM_LANG_CXX11 +#define GLM_GTX_hash 1 +#include + +namespace std +{ + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::vec<1, T, Q> const& v) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::vec<2, T, Q> const& v) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::vec<3, T, Q> const& v) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::vec<4, T, Q> const& v) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::qua const& q) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::tdualquat const& q) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<2, 2, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<2, 3, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<2, 4, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<3, 2, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<3, 3, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<3, 4, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<4, 2, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<4, 3, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<4, 4, T,Q> const& m) const GLM_NOEXCEPT; + }; +} // namespace std + +#include "hash.inl" + +#endif //GLM_LANG & GLM_LANG_CXX11 diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/hash.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/hash.inl new file mode 100644 index 000000000000..bcadfe53e558 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/hash.inl @@ -0,0 +1,175 @@ +/// @ref gtx_hash + +namespace glm { +namespace detail +{ + GLM_INLINE void hash_combine(size_t &seed, size_t hash) + { + hash += 0x9e3779b9 + (seed << 6) + (seed >> 2); + seed ^= hash; + } +}} + +namespace std +{ + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::vec<1, T, Q> const& v) const GLM_NOEXCEPT + { + hash hasher; + return hasher(v.x); + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::vec<2, T, Q> const& v) const GLM_NOEXCEPT + { + size_t seed = 0; + hash hasher; + glm::detail::hash_combine(seed, hasher(v.x)); + glm::detail::hash_combine(seed, hasher(v.y)); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::vec<3, T, Q> const& v) const GLM_NOEXCEPT + { + size_t seed = 0; + hash hasher; + glm::detail::hash_combine(seed, hasher(v.x)); + glm::detail::hash_combine(seed, hasher(v.y)); + glm::detail::hash_combine(seed, hasher(v.z)); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::vec<4, T, Q> const& v) const GLM_NOEXCEPT + { + size_t seed = 0; + hash hasher; + glm::detail::hash_combine(seed, hasher(v.x)); + glm::detail::hash_combine(seed, hasher(v.y)); + glm::detail::hash_combine(seed, hasher(v.z)); + glm::detail::hash_combine(seed, hasher(v.w)); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::qua const& q) const GLM_NOEXCEPT + { + size_t seed = 0; + hash hasher; + glm::detail::hash_combine(seed, hasher(q.x)); + glm::detail::hash_combine(seed, hasher(q.y)); + glm::detail::hash_combine(seed, hasher(q.z)); + glm::detail::hash_combine(seed, hasher(q.w)); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::tdualquat const& q) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(q.real)); + glm::detail::hash_combine(seed, hasher(q.dual)); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<2, 2, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<2, 3, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<2, 4, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<3, 2, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<3, 3, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<3, 4, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<4, 2, T,Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + glm::detail::hash_combine(seed, hasher(m[3])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<4, 3, T,Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + glm::detail::hash_combine(seed, hasher(m[3])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<4, 4, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + glm::detail::hash_combine(seed, hasher(m[3])); + return seed; + } +} diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/integer.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/integer.hpp new file mode 100644 index 000000000000..d0b4c61a3fd4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/integer.hpp @@ -0,0 +1,76 @@ +/// @ref gtx_integer +/// @file glm/gtx/integer.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_integer GLM_GTX_integer +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Add support for integer for core functions + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/integer.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_integer is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_integer extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_integer + /// @{ + + //! Returns x raised to the y power. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL int pow(int x, uint y); + + //! Returns the positive square root of x. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL int sqrt(int x); + + //! Returns the floor log2 of x. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL unsigned int floor_log2(unsigned int x); + + //! Modulus. Returns x - y * floor(x / y) for each component in x using the floating point value y. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL int mod(int x, int y); + + //! Return the factorial value of a number (!12 max, integer only) + //! From GLM_GTX_integer extension. + template + GLM_FUNC_DECL genType factorial(genType const& x); + + //! 32bit signed integer. + //! From GLM_GTX_integer extension. + typedef signed int sint; + + //! Returns x raised to the y power. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL uint pow(uint x, uint y); + + //! Returns the positive square root of x. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL uint sqrt(uint x); + + //! Modulus. Returns x - y * floor(x / y) for each component in x using the floating point value y. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL uint mod(uint x, uint y); + + //! Returns the number of leading zeros. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL uint nlz(uint x); + + /// @} +}//namespace glm + +#include "integer.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/integer.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/integer.inl new file mode 100644 index 000000000000..956366b250f8 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/integer.inl @@ -0,0 +1,185 @@ +/// @ref gtx_integer + +namespace glm +{ + // pow + GLM_FUNC_QUALIFIER int pow(int x, uint y) + { + if(y == 0) + return x >= 0 ? 1 : -1; + + int result = x; + for(uint i = 1; i < y; ++i) + result *= x; + return result; + } + + // sqrt: From Christopher J. Musial, An integer square root, Graphics Gems, 1990, page 387 + GLM_FUNC_QUALIFIER int sqrt(int x) + { + if(x <= 1) return x; + + int NextTrial = x >> 1; + int CurrentAnswer; + + do + { + CurrentAnswer = NextTrial; + NextTrial = (NextTrial + x / NextTrial) >> 1; + } while(NextTrial < CurrentAnswer); + + return CurrentAnswer; + } + +// Henry Gordon Dietz: http://aggregate.org/MAGIC/ +namespace detail +{ + GLM_FUNC_QUALIFIER unsigned int ones32(unsigned int x) + { + /* 32-bit recursive reduction using SWAR... + but first step is mapping 2-bit values + into sum of 2 1-bit values in sneaky way + */ + x -= ((x >> 1) & 0x55555555); + x = (((x >> 2) & 0x33333333) + (x & 0x33333333)); + x = (((x >> 4) + x) & 0x0f0f0f0f); + x += (x >> 8); + x += (x >> 16); + return(x & 0x0000003f); + } +}//namespace detail + + // Henry Gordon Dietz: http://aggregate.org/MAGIC/ +/* + GLM_FUNC_QUALIFIER unsigned int floor_log2(unsigned int x) + { + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); + + return _detail::ones32(x) >> 1; + } +*/ + // mod + GLM_FUNC_QUALIFIER int mod(int x, int y) + { + return ((x % y) + y) % y; + } + + // factorial (!12 max, integer only) + template + GLM_FUNC_QUALIFIER genType factorial(genType const& x) + { + genType Temp = x; + genType Result; + for(Result = 1; Temp > 1; --Temp) + Result *= Temp; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> factorial( + vec<2, T, Q> const& x) + { + return vec<2, T, Q>( + factorial(x.x), + factorial(x.y)); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> factorial( + vec<3, T, Q> const& x) + { + return vec<3, T, Q>( + factorial(x.x), + factorial(x.y), + factorial(x.z)); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> factorial( + vec<4, T, Q> const& x) + { + return vec<4, T, Q>( + factorial(x.x), + factorial(x.y), + factorial(x.z), + factorial(x.w)); + } + + GLM_FUNC_QUALIFIER uint pow(uint x, uint y) + { + if (y == 0) + return 1u; + + uint result = x; + for(uint i = 1; i < y; ++i) + result *= x; + return result; + } + + GLM_FUNC_QUALIFIER uint sqrt(uint x) + { + if(x <= 1) return x; + + uint NextTrial = x >> 1; + uint CurrentAnswer; + + do + { + CurrentAnswer = NextTrial; + NextTrial = (NextTrial + x / NextTrial) >> 1; + } while(NextTrial < CurrentAnswer); + + return CurrentAnswer; + } + + GLM_FUNC_QUALIFIER uint mod(uint x, uint y) + { + return x - y * (x / y); + } + +#if(GLM_COMPILER & (GLM_COMPILER_VC | GLM_COMPILER_GCC)) + + GLM_FUNC_QUALIFIER unsigned int nlz(unsigned int x) + { + return 31u - findMSB(x); + } + +#else + + // Hackers Delight: http://www.hackersdelight.org/HDcode/nlz.c.txt + GLM_FUNC_QUALIFIER unsigned int nlz(unsigned int x) + { + int y, m, n; + + y = -int(x >> 16); // If left half of x is 0, + m = (y >> 16) & 16; // set n = 16. If left half + n = 16 - m; // is nonzero, set n = 0 and + x = x >> m; // shift x right 16. + // Now x is of the form 0000xxxx. + y = x - 0x100; // If positions 8-15 are 0, + m = (y >> 16) & 8; // add 8 to n and shift x left 8. + n = n + m; + x = x << m; + + y = x - 0x1000; // If positions 12-15 are 0, + m = (y >> 16) & 4; // add 4 to n and shift x left 4. + n = n + m; + x = x << m; + + y = x - 0x4000; // If positions 14-15 are 0, + m = (y >> 16) & 2; // add 2 to n and shift x left 2. + n = n + m; + x = x << m; + + y = x >> 14; // Set y = 0, 1, 2, or 3. + m = y & ~(y >> 1); // Set m = 0, 1, 2, or 2 resp. + return unsigned(n + 2 - m); + } + +#endif//(GLM_COMPILER) + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/intersect.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/intersect.hpp new file mode 100644 index 000000000000..f5c0621deb1a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/intersect.hpp @@ -0,0 +1,92 @@ +/// @ref gtx_intersect +/// @file glm/gtx/intersect.hpp +/// +/// @see core (dependence) +/// @see gtx_closest_point (dependence) +/// +/// @defgroup gtx_intersect GLM_GTX_intersect +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Add intersection functions + +#pragma once + +// Dependency: +#include +#include +#include "../glm.hpp" +#include "../geometric.hpp" +#include "../gtx/closest_point.hpp" +#include "../gtx/vector_query.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_closest_point extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_intersect + /// @{ + + //! Compute the intersection of a ray and a plane. + //! Ray direction and plane normal must be unit length. + //! From GLM_GTX_intersect extension. + template + GLM_FUNC_DECL bool intersectRayPlane( + genType const& orig, genType const& dir, + genType const& planeOrig, genType const& planeNormal, + typename genType::value_type & intersectionDistance); + + //! Compute the intersection of a ray and a triangle. + /// Based om Tomas Möller implementation http://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/raytri/ + //! From GLM_GTX_intersect extension. + template + GLM_FUNC_DECL bool intersectRayTriangle( + vec<3, T, Q> const& orig, vec<3, T, Q> const& dir, + vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, vec<3, T, Q> const& v2, + vec<2, T, Q>& baryPosition, T& distance); + + //! Compute the intersection of a line and a triangle. + //! From GLM_GTX_intersect extension. + template + GLM_FUNC_DECL bool intersectLineTriangle( + genType const& orig, genType const& dir, + genType const& vert0, genType const& vert1, genType const& vert2, + genType & position); + + //! Compute the intersection distance of a ray and a sphere. + //! The ray direction vector is unit length. + //! From GLM_GTX_intersect extension. + template + GLM_FUNC_DECL bool intersectRaySphere( + genType const& rayStarting, genType const& rayNormalizedDirection, + genType const& sphereCenter, typename genType::value_type const sphereRadiusSquared, + typename genType::value_type & intersectionDistance); + + //! Compute the intersection of a ray and a sphere. + //! From GLM_GTX_intersect extension. + template + GLM_FUNC_DECL bool intersectRaySphere( + genType const& rayStarting, genType const& rayNormalizedDirection, + genType const& sphereCenter, const typename genType::value_type sphereRadius, + genType & intersectionPosition, genType & intersectionNormal); + + //! Compute the intersection of a line and a sphere. + //! From GLM_GTX_intersect extension + template + GLM_FUNC_DECL bool intersectLineSphere( + genType const& point0, genType const& point1, + genType const& sphereCenter, typename genType::value_type sphereRadius, + genType & intersectionPosition1, genType & intersectionNormal1, + genType & intersectionPosition2 = genType(), genType & intersectionNormal2 = genType()); + + /// @} +}//namespace glm + +#include "intersect.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/intersect.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/intersect.inl new file mode 100644 index 000000000000..925a903d4a02 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/intersect.inl @@ -0,0 +1,200 @@ +/// @ref gtx_intersect + +namespace glm +{ + template + GLM_FUNC_QUALIFIER bool intersectRayPlane + ( + genType const& orig, genType const& dir, + genType const& planeOrig, genType const& planeNormal, + typename genType::value_type & intersectionDistance + ) + { + typename genType::value_type d = glm::dot(dir, planeNormal); + typename genType::value_type Epsilon = std::numeric_limits::epsilon(); + + if(glm::abs(d) > Epsilon) // if dir and planeNormal are not perpendicular + { + typename genType::value_type const tmp_intersectionDistance = glm::dot(planeOrig - orig, planeNormal) / d; + if (tmp_intersectionDistance > static_cast(0)) { // allow only intersections + intersectionDistance = tmp_intersectionDistance; + return true; + } + } + + return false; + } + + template + GLM_FUNC_QUALIFIER bool intersectRayTriangle + ( + vec<3, T, Q> const& orig, vec<3, T, Q> const& dir, + vec<3, T, Q> const& vert0, vec<3, T, Q> const& vert1, vec<3, T, Q> const& vert2, + vec<2, T, Q>& baryPosition, T& distance + ) + { + // find vectors for two edges sharing vert0 + vec<3, T, Q> const edge1 = vert1 - vert0; + vec<3, T, Q> const edge2 = vert2 - vert0; + + // begin calculating determinant - also used to calculate U parameter + vec<3, T, Q> const p = glm::cross(dir, edge2); + + // if determinant is near zero, ray lies in plane of triangle + T const det = glm::dot(edge1, p); + + vec<3, T, Q> Perpendicular(0); + + if (det > static_cast(0)) + { + // calculate distance from vert0 to ray origin + vec<3, T, Q> const dist = orig - vert0; + + // calculate U parameter and test bounds + baryPosition.x = glm::dot(dist, p); + if(baryPosition.x < static_cast(0) || baryPosition.x > det) + return false; + + // prepare to test V parameter + Perpendicular = glm::cross(dist, edge1); + + // calculate V parameter and test bounds + baryPosition.y = glm::dot(dir, Perpendicular); + if((baryPosition.y < static_cast(0)) || ((baryPosition.x + baryPosition.y) > det)) + return false; + } + else if(det < static_cast(0)) + { + // calculate distance from vert0 to ray origin + vec<3, T, Q> const dist = orig - vert0; + + // calculate U parameter and test bounds + baryPosition.x = glm::dot(dist, p); + if((baryPosition.x > static_cast(0)) || (baryPosition.x < det)) + return false; + + // prepare to test V parameter + Perpendicular = glm::cross(dist, edge1); + + // calculate V parameter and test bounds + baryPosition.y = glm::dot(dir, Perpendicular); + if((baryPosition.y > static_cast(0)) || (baryPosition.x + baryPosition.y < det)) + return false; + } + else + return false; // ray is parallel to the plane of the triangle + + T inv_det = static_cast(1) / det; + + // calculate distance, ray intersects triangle + distance = glm::dot(edge2, Perpendicular) * inv_det; + baryPosition *= inv_det; + + return true; + } + + template + GLM_FUNC_QUALIFIER bool intersectLineTriangle + ( + genType const& orig, genType const& dir, + genType const& vert0, genType const& vert1, genType const& vert2, + genType & position + ) + { + typename genType::value_type Epsilon = std::numeric_limits::epsilon(); + + genType edge1 = vert1 - vert0; + genType edge2 = vert2 - vert0; + + genType Perpendicular = cross(dir, edge2); + + typename genType::value_type det = dot(edge1, Perpendicular); + + if (det > -Epsilon && det < Epsilon) + return false; + typename genType::value_type inv_det = typename genType::value_type(1) / det; + + genType Tangent = orig - vert0; + + position.y = dot(Tangent, Perpendicular) * inv_det; + if (position.y < typename genType::value_type(0) || position.y > typename genType::value_type(1)) + return false; + + genType Cotangent = cross(Tangent, edge1); + + position.z = dot(dir, Cotangent) * inv_det; + if (position.z < typename genType::value_type(0) || position.y + position.z > typename genType::value_type(1)) + return false; + + position.x = dot(edge2, Cotangent) * inv_det; + + return true; + } + + template + GLM_FUNC_QUALIFIER bool intersectRaySphere + ( + genType const& rayStarting, genType const& rayNormalizedDirection, + genType const& sphereCenter, const typename genType::value_type sphereRadiusSquared, + typename genType::value_type & intersectionDistance + ) + { + typename genType::value_type Epsilon = std::numeric_limits::epsilon(); + genType diff = sphereCenter - rayStarting; + typename genType::value_type t0 = dot(diff, rayNormalizedDirection); + typename genType::value_type dSquared = dot(diff, diff) - t0 * t0; + if( dSquared > sphereRadiusSquared ) + { + return false; + } + typename genType::value_type t1 = sqrt( sphereRadiusSquared - dSquared ); + intersectionDistance = t0 > t1 + Epsilon ? t0 - t1 : t0 + t1; + return intersectionDistance > Epsilon; + } + + template + GLM_FUNC_QUALIFIER bool intersectRaySphere + ( + genType const& rayStarting, genType const& rayNormalizedDirection, + genType const& sphereCenter, const typename genType::value_type sphereRadius, + genType & intersectionPosition, genType & intersectionNormal + ) + { + typename genType::value_type distance; + if( intersectRaySphere( rayStarting, rayNormalizedDirection, sphereCenter, sphereRadius * sphereRadius, distance ) ) + { + intersectionPosition = rayStarting + rayNormalizedDirection * distance; + intersectionNormal = (intersectionPosition - sphereCenter) / sphereRadius; + return true; + } + return false; + } + + template + GLM_FUNC_QUALIFIER bool intersectLineSphere + ( + genType const& point0, genType const& point1, + genType const& sphereCenter, typename genType::value_type sphereRadius, + genType & intersectionPoint1, genType & intersectionNormal1, + genType & intersectionPoint2, genType & intersectionNormal2 + ) + { + typename genType::value_type Epsilon = std::numeric_limits::epsilon(); + genType dir = normalize(point1 - point0); + genType diff = sphereCenter - point0; + typename genType::value_type t0 = dot(diff, dir); + typename genType::value_type dSquared = dot(diff, diff) - t0 * t0; + if( dSquared > sphereRadius * sphereRadius ) + { + return false; + } + typename genType::value_type t1 = sqrt( sphereRadius * sphereRadius - dSquared ); + if( t0 < t1 + Epsilon ) + t1 = -t1; + intersectionPoint1 = point0 + dir * (t0 - t1); + intersectionNormal1 = (intersectionPoint1 - sphereCenter) / sphereRadius; + intersectionPoint2 = point0 + dir * (t0 + t1); + intersectionNormal2 = (intersectionPoint2 - sphereCenter) / sphereRadius; + return true; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/io.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/io.hpp new file mode 100644 index 000000000000..68b5499bfbe7 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/io.hpp @@ -0,0 +1,201 @@ +/// @ref gtx_io +/// @file glm/gtx/io.hpp +/// @author Jan P Springer (regnirpsj@gmail.com) +/// +/// @see core (dependence) +/// @see gtc_matrix_access (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtx_io GLM_GTX_io +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// std::[w]ostream support for glm types +/// +/// std::[w]ostream support for glm types + qualifier/width/etc. manipulators +/// based on howard hinnant's std::chrono io proposal +/// [http://home.roadrunner.com/~hinnant/bloomington/chrono_io.html] + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/quaternion.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_io is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_io extension included") +# endif +#endif + +#include // std::basic_ostream<> (fwd) +#include // std::locale, std::locale::facet, std::locale::id +#include // std::pair<> + +namespace glm +{ + /// @addtogroup gtx_io + /// @{ + + namespace io + { + enum order_type { column_major, row_major}; + + template + class format_punct : public std::locale::facet + { + typedef CTy char_type; + + public: + + static std::locale::id id; + + bool formatted; + unsigned precision; + unsigned width; + char_type separator; + char_type delim_left; + char_type delim_right; + char_type space; + char_type newline; + order_type order; + + GLM_FUNC_DECL explicit format_punct(size_t a = 0); + GLM_FUNC_DECL explicit format_punct(format_punct const&); + }; + + template > + class basic_state_saver { + + public: + + GLM_FUNC_DECL explicit basic_state_saver(std::basic_ios&); + GLM_FUNC_DECL ~basic_state_saver(); + + private: + + typedef ::std::basic_ios state_type; + typedef typename state_type::char_type char_type; + typedef ::std::ios_base::fmtflags flags_type; + typedef ::std::streamsize streamsize_type; + typedef ::std::locale const locale_type; + + state_type& state_; + flags_type flags_; + streamsize_type precision_; + streamsize_type width_; + char_type fill_; + locale_type locale_; + + GLM_FUNC_DECL basic_state_saver& operator=(basic_state_saver const&); + }; + + typedef basic_state_saver state_saver; + typedef basic_state_saver wstate_saver; + + template > + class basic_format_saver + { + public: + + GLM_FUNC_DECL explicit basic_format_saver(std::basic_ios&); + GLM_FUNC_DECL ~basic_format_saver(); + + private: + + basic_state_saver const bss_; + + GLM_FUNC_DECL basic_format_saver& operator=(basic_format_saver const&); + }; + + typedef basic_format_saver format_saver; + typedef basic_format_saver wformat_saver; + + struct precision + { + unsigned value; + + GLM_FUNC_DECL explicit precision(unsigned); + }; + + struct width + { + unsigned value; + + GLM_FUNC_DECL explicit width(unsigned); + }; + + template + struct delimeter + { + CTy value[3]; + + GLM_FUNC_DECL explicit delimeter(CTy /* left */, CTy /* right */, CTy /* separator */ = ','); + }; + + struct order + { + order_type value; + + GLM_FUNC_DECL explicit order(order_type); + }; + + // functions, inlined (inline) + + template + FTy const& get_facet(std::basic_ios&); + template + std::basic_ios& formatted(std::basic_ios&); + template + std::basic_ios& unformatted(std::basic_ios&); + + template + std::basic_ostream& operator<<(std::basic_ostream&, precision const&); + template + std::basic_ostream& operator<<(std::basic_ostream&, width const&); + template + std::basic_ostream& operator<<(std::basic_ostream&, delimeter const&); + template + std::basic_ostream& operator<<(std::basic_ostream&, order const&); + }//namespace io + + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, qua const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<1, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<2, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<3, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<4, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 2, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 3, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 4, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 2, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 3, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 4, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 2, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 3, T, Q> const&); + template + GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 4, T, Q> const&); + + template + GLM_FUNC_DECL std::basic_ostream & operator<<(std::basic_ostream &, + std::pair const, mat<4, 4, T, Q> const> const&); + + /// @} +}//namespace glm + +#include "io.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/io.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/io.inl new file mode 100644 index 000000000000..a3a1bb6c26b4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/io.inl @@ -0,0 +1,440 @@ +/// @ref gtx_io +/// @author Jan P Springer (regnirpsj@gmail.com) + +#include // std::fixed, std::setfill<>, std::setprecision, std::right, std::setw +#include // std::basic_ostream<> +#include "../gtc/matrix_access.hpp" // glm::col, glm::row +#include "../gtx/type_trait.hpp" // glm::type<> + +namespace glm{ +namespace io +{ + template + GLM_FUNC_QUALIFIER format_punct::format_punct(size_t a) + : std::locale::facet(a) + , formatted(true) + , precision(3) + , width(1 + 4 + 1 + precision) + , separator(',') + , delim_left('[') + , delim_right(']') + , space(' ') + , newline('\n') + , order(column_major) + {} + + template + GLM_FUNC_QUALIFIER format_punct::format_punct(format_punct const& a) + : std::locale::facet(0) + , formatted(a.formatted) + , precision(a.precision) + , width(a.width) + , separator(a.separator) + , delim_left(a.delim_left) + , delim_right(a.delim_right) + , space(a.space) + , newline(a.newline) + , order(a.order) + {} + + template std::locale::id format_punct::id; + + template + GLM_FUNC_QUALIFIER basic_state_saver::basic_state_saver(std::basic_ios& a) + : state_(a) + , flags_(a.flags()) + , precision_(a.precision()) + , width_(a.width()) + , fill_(a.fill()) + , locale_(a.getloc()) + {} + + template + GLM_FUNC_QUALIFIER basic_state_saver::~basic_state_saver() + { + state_.imbue(locale_); + state_.fill(fill_); + state_.width(width_); + state_.precision(precision_); + state_.flags(flags_); + } + + template + GLM_FUNC_QUALIFIER basic_format_saver::basic_format_saver(std::basic_ios& a) + : bss_(a) + { + a.imbue(std::locale(a.getloc(), new format_punct(get_facet >(a)))); + } + + template + GLM_FUNC_QUALIFIER + basic_format_saver::~basic_format_saver() + {} + + GLM_FUNC_QUALIFIER precision::precision(unsigned a) + : value(a) + {} + + GLM_FUNC_QUALIFIER width::width(unsigned a) + : value(a) + {} + + template + GLM_FUNC_QUALIFIER delimeter::delimeter(CTy a, CTy b, CTy c) + : value() + { + value[0] = a; + value[1] = b; + value[2] = c; + } + + GLM_FUNC_QUALIFIER order::order(order_type a) + : value(a) + {} + + template + GLM_FUNC_QUALIFIER FTy const& get_facet(std::basic_ios& ios) + { + if(!std::has_facet(ios.getloc())) + ios.imbue(std::locale(ios.getloc(), new FTy)); + + return std::use_facet(ios.getloc()); + } + + template + GLM_FUNC_QUALIFIER std::basic_ios& formatted(std::basic_ios& ios) + { + const_cast&>(get_facet >(ios)).formatted = true; + return ios; + } + + template + GLM_FUNC_QUALIFIER std::basic_ios& unformatted(std::basic_ios& ios) + { + const_cast&>(get_facet >(ios)).formatted = false; + return ios; + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, precision const& a) + { + const_cast&>(get_facet >(os)).precision = a.value; + return os; + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, width const& a) + { + const_cast&>(get_facet >(os)).width = a.value; + return os; + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, delimeter const& a) + { + format_punct & fmt(const_cast&>(get_facet >(os))); + + fmt.delim_left = a.value[0]; + fmt.delim_right = a.value[1]; + fmt.separator = a.value[2]; + + return os; + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, order const& a) + { + const_cast&>(get_facet >(os)).order = a.value; + return os; + } +} // namespace io + +namespace detail +{ + template + GLM_FUNC_QUALIFIER std::basic_ostream& + print_vector_on(std::basic_ostream& os, V const& a) + { + typename std::basic_ostream::sentry const cerberus(os); + + if(cerberus) + { + io::format_punct const& fmt(io::get_facet >(os)); + + length_t const& components(type::components); + + if(fmt.formatted) + { + io::basic_state_saver const bss(os); + + os << std::fixed << std::right << std::setprecision(fmt.precision) << std::setfill(fmt.space) << fmt.delim_left; + + for(length_t i(0); i < components; ++i) + { + os << std::setw(fmt.width) << a[i]; + if(components-1 != i) + os << fmt.separator; + } + + os << fmt.delim_right; + } + else + { + for(length_t i(0); i < components; ++i) + { + os << a[i]; + + if(components-1 != i) + os << fmt.space; + } + } + } + + return os; + } +}//namespace detail + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, qua const& a) + { + return detail::print_vector_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<1, T, Q> const& a) + { + return detail::print_vector_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<2, T, Q> const& a) + { + return detail::print_vector_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<3, T, Q> const& a) + { + return detail::print_vector_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<4, T, Q> const& a) + { + return detail::print_vector_on(os, a); + } + +namespace detail +{ + template class M, length_t C, length_t R, typename T, qualifier Q> + GLM_FUNC_QUALIFIER std::basic_ostream& print_matrix_on(std::basic_ostream& os, M const& a) + { + typename std::basic_ostream::sentry const cerberus(os); + + if(cerberus) + { + io::format_punct const& fmt(io::get_facet >(os)); + + length_t const& cols(type >::cols); + length_t const& rows(type >::rows); + + if(fmt.formatted) + { + os << fmt.newline << fmt.delim_left; + + switch(fmt.order) + { + case io::column_major: + { + for(length_t i(0); i < rows; ++i) + { + if (0 != i) + os << fmt.space; + + os << row(a, i); + + if(rows-1 != i) + os << fmt.newline; + } + } + break; + + case io::row_major: + { + for(length_t i(0); i < cols; ++i) + { + if(0 != i) + os << fmt.space; + + os << column(a, i); + + if(cols-1 != i) + os << fmt.newline; + } + } + break; + } + + os << fmt.delim_right; + } + else + { + switch (fmt.order) + { + case io::column_major: + { + for(length_t i(0); i < cols; ++i) + { + os << column(a, i); + + if(cols - 1 != i) + os << fmt.space; + } + } + break; + + case io::row_major: + { + for (length_t i(0); i < rows; ++i) + { + os << row(a, i); + + if (rows-1 != i) + os << fmt.space; + } + } + break; + } + } + } + + return os; + } +}//namespace detail + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 2, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 3, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 4, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<3, 2, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<3, 3, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<3, 4, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 2, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 3, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 4, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + +namespace detail +{ + template class M, length_t C, length_t R, typename T, qualifier Q> + GLM_FUNC_QUALIFIER std::basic_ostream& print_matrix_pair_on(std::basic_ostream& os, std::pair const, M const> const& a) + { + typename std::basic_ostream::sentry const cerberus(os); + + if(cerberus) + { + io::format_punct const& fmt(io::get_facet >(os)); + M const& ml(a.first); + M const& mr(a.second); + length_t const& cols(type >::cols); + length_t const& rows(type >::rows); + + if(fmt.formatted) + { + os << fmt.newline << fmt.delim_left; + + switch(fmt.order) + { + case io::column_major: + { + for(length_t i(0); i < rows; ++i) + { + if(0 != i) + os << fmt.space; + + os << row(ml, i) << ((rows-1 != i) ? fmt.space : fmt.delim_right) << fmt.space << ((0 != i) ? fmt.space : fmt.delim_left) << row(mr, i); + + if(rows-1 != i) + os << fmt.newline; + } + } + break; + case io::row_major: + { + for(length_t i(0); i < cols; ++i) + { + if(0 != i) + os << fmt.space; + + os << column(ml, i) << ((cols-1 != i) ? fmt.space : fmt.delim_right) << fmt.space << ((0 != i) ? fmt.space : fmt.delim_left) << column(mr, i); + + if(cols-1 != i) + os << fmt.newline; + } + } + break; + } + + os << fmt.delim_right; + } + else + { + os << ml << fmt.space << mr; + } + } + + return os; + } +}//namespace detail + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<( + std::basic_ostream & os, + std::pair const, + mat<4, 4, T, Q> const> const& a) + { + return detail::print_matrix_pair_on(os, a); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/log_base.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/log_base.hpp new file mode 100644 index 000000000000..ba28c9d7bffc --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/log_base.hpp @@ -0,0 +1,48 @@ +/// @ref gtx_log_base +/// @file glm/gtx/log_base.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_log_base GLM_GTX_log_base +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Logarithm for any base. base can be a vector or a scalar. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_log_base is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_log_base extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_log_base + /// @{ + + /// Logarithm for any base. + /// From GLM_GTX_log_base. + template + GLM_FUNC_DECL genType log( + genType const& x, + genType const& base); + + /// Logarithm for any base. + /// From GLM_GTX_log_base. + template + GLM_FUNC_DECL vec sign( + vec const& x, + vec const& base); + + /// @} +}//namespace glm + +#include "log_base.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/log_base.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/log_base.inl new file mode 100644 index 000000000000..4bbb8e895abb --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/log_base.inl @@ -0,0 +1,16 @@ +/// @ref gtx_log_base + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType log(genType const& x, genType const& base) + { + return glm::log(x) / glm::log(base); + } + + template + GLM_FUNC_QUALIFIER vec log(vec const& x, vec const& base) + { + return glm::log(x) / glm::log(base); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_cross_product.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_cross_product.hpp new file mode 100644 index 000000000000..1e585f9a4ffe --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_cross_product.hpp @@ -0,0 +1,47 @@ +/// @ref gtx_matrix_cross_product +/// @file glm/gtx/matrix_cross_product.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_matrix_cross_product GLM_GTX_matrix_cross_product +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Build cross product matrices + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_matrix_cross_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_matrix_cross_product extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_cross_product + /// @{ + + //! Build a cross product matrix. + //! From GLM_GTX_matrix_cross_product extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> matrixCross3( + vec<3, T, Q> const& x); + + //! Build a cross product matrix. + //! From GLM_GTX_matrix_cross_product extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> matrixCross4( + vec<3, T, Q> const& x); + + /// @} +}//namespace glm + +#include "matrix_cross_product.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_cross_product.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_cross_product.inl new file mode 100644 index 000000000000..3a153977cf59 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_cross_product.inl @@ -0,0 +1,37 @@ +/// @ref gtx_matrix_cross_product + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> matrixCross3 + ( + vec<3, T, Q> const& x + ) + { + mat<3, 3, T, Q> Result(T(0)); + Result[0][1] = x.z; + Result[1][0] = -x.z; + Result[0][2] = -x.y; + Result[2][0] = x.y; + Result[1][2] = x.x; + Result[2][1] = -x.x; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> matrixCross4 + ( + vec<3, T, Q> const& x + ) + { + mat<4, 4, T, Q> Result(T(0)); + Result[0][1] = x.z; + Result[1][0] = -x.z; + Result[0][2] = -x.y; + Result[2][0] = x.y; + Result[1][2] = x.x; + Result[2][1] = -x.x; + return Result; + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_decompose.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_decompose.hpp new file mode 100644 index 000000000000..8ab38e6b36a5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_decompose.hpp @@ -0,0 +1,52 @@ +/// @ref gtx_matrix_decompose +/// @file glm/gtx/matrix_decompose.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_matrix_decompose GLM_GTX_matrix_decompose +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Decomposes a model matrix to translations, rotation and scale components + +#pragma once + +// Dependencies +#include "../mat4x4.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../geometric.hpp" +#include "../gtc/quaternion.hpp" +#include "../gtc/matrix_transform.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_matrix_decompose is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_matrix_decompose extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_decompose + /// @{ + + /// Decomposes a model matrix to translations, rotation and scale components + /// @see gtx_matrix_decompose + template + GLM_FUNC_DECL bool decompose( + mat<4, 4, T, Q> const& modelMatrix, + vec<3, T, Q> & scale, qua & orientation, vec<3, T, Q> & translation, vec<3, T, Q> & skew, vec<4, T, Q> & perspective); + + // Recomposes a model matrix from a previously-decomposed matrix + template + GLM_FUNC_DECL mat<4, 4, T, Q> recompose( + vec<3, T, Q> const& scale, qua const& orientation, vec<3, T, Q> const& translation, + vec<3, T, Q> const& skew, vec<4, T, Q> const& perspective); + + /// @} +}//namespace glm + +#include "matrix_decompose.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_decompose.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_decompose.inl new file mode 100644 index 000000000000..1b587e2a1b00 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_decompose.inl @@ -0,0 +1,234 @@ +/// @ref gtx_matrix_decompose + +#include "../gtc/constants.hpp" +#include "../gtc/epsilon.hpp" +#include "../gtx/transform.hpp" + +namespace glm{ +namespace detail +{ + /// Make a linear combination of two vectors and return the result. + // result = (a * ascl) + (b * bscl) + template + GLM_FUNC_QUALIFIER vec<3, T, Q> combine( + vec<3, T, Q> const& a, + vec<3, T, Q> const& b, + T ascl, T bscl) + { + return (a * ascl) + (b * bscl); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> scale(vec<3, T, Q> const& v, T desiredLength) + { + return v * desiredLength / length(v); + } +}//namespace detail + + // Matrix decompose + // http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp + // Decomposes the mode matrix to translations,rotation scale components + + template + GLM_FUNC_QUALIFIER bool decompose(mat<4, 4, T, Q> const& ModelMatrix, vec<3, T, Q> & Scale, qua & Orientation, vec<3, T, Q> & Translation, vec<3, T, Q> & Skew, vec<4, T, Q> & Perspective) + { + mat<4, 4, T, Q> LocalMatrix(ModelMatrix); + + // Normalize the matrix. + if(epsilonEqual(LocalMatrix[3][3], static_cast(0), epsilon())) + return false; + + for(length_t i = 0; i < 4; ++i) + for(length_t j = 0; j < 4; ++j) + LocalMatrix[i][j] /= LocalMatrix[3][3]; + + // perspectiveMatrix is used to solve for perspective, but it also provides + // an easy way to test for singularity of the upper 3x3 component. + mat<4, 4, T, Q> PerspectiveMatrix(LocalMatrix); + + for(length_t i = 0; i < 3; i++) + PerspectiveMatrix[i][3] = static_cast(0); + PerspectiveMatrix[3][3] = static_cast(1); + + /// TODO: Fixme! + if(epsilonEqual(determinant(PerspectiveMatrix), static_cast(0), epsilon())) + return false; + + // First, isolate perspective. This is the messiest. + if( + epsilonNotEqual(LocalMatrix[0][3], static_cast(0), epsilon()) || + epsilonNotEqual(LocalMatrix[1][3], static_cast(0), epsilon()) || + epsilonNotEqual(LocalMatrix[2][3], static_cast(0), epsilon())) + { + // rightHandSide is the right hand side of the equation. + vec<4, T, Q> RightHandSide; + RightHandSide[0] = LocalMatrix[0][3]; + RightHandSide[1] = LocalMatrix[1][3]; + RightHandSide[2] = LocalMatrix[2][3]; + RightHandSide[3] = LocalMatrix[3][3]; + + // Solve the equation by inverting PerspectiveMatrix and multiplying + // rightHandSide by the inverse. (This is the easiest way, not + // necessarily the best.) + mat<4, 4, T, Q> InversePerspectiveMatrix = glm::inverse(PerspectiveMatrix);// inverse(PerspectiveMatrix, inversePerspectiveMatrix); + mat<4, 4, T, Q> TransposedInversePerspectiveMatrix = glm::transpose(InversePerspectiveMatrix);// transposeMatrix4(inversePerspectiveMatrix, transposedInversePerspectiveMatrix); + + Perspective = TransposedInversePerspectiveMatrix * RightHandSide; + // v4MulPointByMatrix(rightHandSide, transposedInversePerspectiveMatrix, perspectivePoint); + + // Clear the perspective partition + LocalMatrix[0][3] = LocalMatrix[1][3] = LocalMatrix[2][3] = static_cast(0); + LocalMatrix[3][3] = static_cast(1); + } + else + { + // No perspective. + Perspective = vec<4, T, Q>(0, 0, 0, 1); + } + + // Next take care of translation (easy). + Translation = vec<3, T, Q>(LocalMatrix[3]); + LocalMatrix[3] = vec<4, T, Q>(0, 0, 0, LocalMatrix[3].w); + + vec<3, T, Q> Row[3], Pdum3; + + // Now get scale and shear. + for(length_t i = 0; i < 3; ++i) + for(length_t j = 0; j < 3; ++j) + Row[i][j] = LocalMatrix[i][j]; + + // Compute X scale factor and normalize first row. + Scale.x = length(Row[0]);// v3Length(Row[0]); + + Row[0] = detail::scale(Row[0], static_cast(1)); + + // Compute XY shear factor and make 2nd row orthogonal to 1st. + Skew.z = dot(Row[0], Row[1]); + Row[1] = detail::combine(Row[1], Row[0], static_cast(1), -Skew.z); + + // Now, compute Y scale and normalize 2nd row. + Scale.y = length(Row[1]); + Row[1] = detail::scale(Row[1], static_cast(1)); + Skew.z /= Scale.y; + + // Compute XZ and YZ shears, orthogonalize 3rd row. + Skew.y = glm::dot(Row[0], Row[2]); + Row[2] = detail::combine(Row[2], Row[0], static_cast(1), -Skew.y); + Skew.x = glm::dot(Row[1], Row[2]); + Row[2] = detail::combine(Row[2], Row[1], static_cast(1), -Skew.x); + + // Next, get Z scale and normalize 3rd row. + Scale.z = length(Row[2]); + Row[2] = detail::scale(Row[2], static_cast(1)); + Skew.y /= Scale.z; + Skew.x /= Scale.z; + + // At this point, the matrix (in rows[]) is orthonormal. + // Check for a coordinate system flip. If the determinant + // is -1, then negate the matrix and the scaling factors. + Pdum3 = cross(Row[1], Row[2]); // v3Cross(row[1], row[2], Pdum3); + if(dot(Row[0], Pdum3) < 0) + { + for(length_t i = 0; i < 3; i++) + { + Scale[i] *= static_cast(-1); + Row[i] *= static_cast(-1); + } + } + + // Now, get the rotations out, as described in the gem. + + // FIXME - Add the ability to return either quaternions (which are + // easier to recompose with) or Euler angles (rx, ry, rz), which + // are easier for authors to deal with. The latter will only be useful + // when we fix https://bugs.webkit.org/show_bug.cgi?id=23799, so I + // will leave the Euler angle code here for now. + + // ret.rotateY = asin(-Row[0][2]); + // if (cos(ret.rotateY) != 0) { + // ret.rotateX = atan2(Row[1][2], Row[2][2]); + // ret.rotateZ = atan2(Row[0][1], Row[0][0]); + // } else { + // ret.rotateX = atan2(-Row[2][0], Row[1][1]); + // ret.rotateZ = 0; + // } + + int i, j, k = 0; + T root, trace = Row[0].x + Row[1].y + Row[2].z; + if(trace > static_cast(0)) + { + root = sqrt(trace + static_cast(1.0)); + Orientation.w = static_cast(0.5) * root; + root = static_cast(0.5) / root; + Orientation.x = root * (Row[1].z - Row[2].y); + Orientation.y = root * (Row[2].x - Row[0].z); + Orientation.z = root * (Row[0].y - Row[1].x); + } // End if > 0 + else + { + static int Next[3] = {1, 2, 0}; + i = 0; + if(Row[1].y > Row[0].x) i = 1; + if(Row[2].z > Row[i][i]) i = 2; + j = Next[i]; + k = Next[j]; + +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + int off = 1; +# else + int off = 0; +# endif + + root = sqrt(Row[i][i] - Row[j][j] - Row[k][k] + static_cast(1.0)); + + Orientation[i + off] = static_cast(0.5) * root; + root = static_cast(0.5) / root; + Orientation[j + off] = root * (Row[i][j] + Row[j][i]); + Orientation[k + off] = root * (Row[i][k] + Row[k][i]); + Orientation.w = root * (Row[j][k] - Row[k][j]); + } // End if <= 0 + + return true; + } + + // Recomposes a model matrix from a previously-decomposed matrix + // http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp + // https://stackoverflow.com/a/75573092/1047040 + template + GLM_FUNC_DECL mat<4, 4, T, Q> recompose( + vec<3, T, Q> const& scale, qua const& orientation, vec<3, T, Q> const& translation, + vec<3, T, Q> const& skew, vec<4, T, Q> const& perspective) + { + glm::mat4 m = glm::mat4(1.f); + + m[0][3] = perspective.x; + m[1][3] = perspective.y; + m[2][3] = perspective.z; + m[3][3] = perspective.w; + + m *= glm::translate(translation); + m *= glm::mat4_cast(orientation); + + if (abs(skew.x) > static_cast(0)) { + glm::mat4 tmp(1.f); + tmp[2][1] = skew.x; + m *= tmp; + } + + if (abs(skew.y) > static_cast(0)) { + glm::mat4 tmp(1.f); + tmp[2][0] = skew.y; + m *= tmp; + } + + if (abs(skew.z) > static_cast(0)) { + glm::mat4 tmp(1.f); + tmp[1][0] = skew.z; + m *= tmp; + } + + m *= glm::scale(scale); + + return m; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_factorisation.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_factorisation.hpp new file mode 100644 index 000000000000..5a975d60b6c2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_factorisation.hpp @@ -0,0 +1,69 @@ +/// @ref gtx_matrix_factorisation +/// @file glm/gtx/matrix_factorisation.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_matrix_factorisation GLM_GTX_matrix_factorisation +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Functions to factor matrices in various forms + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_matrix_factorisation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_matrix_factorisation extension included") +# endif +#endif + +/* +Suggestions: + - Move helper functions flipud and fliplr to another file: They may be helpful in more general circumstances. + - Implement other types of matrix factorisation, such as: QL and LQ, L(D)U, eigendecompositions, etc... +*/ + +namespace glm +{ + /// @addtogroup gtx_matrix_factorisation + /// @{ + + /// Flips the matrix rows up and down. + /// + /// From GLM_GTX_matrix_factorisation extension. + template + GLM_FUNC_DECL mat flipud(mat const& in); + + /// Flips the matrix columns right and left. + /// + /// From GLM_GTX_matrix_factorisation extension. + template + GLM_FUNC_DECL mat fliplr(mat const& in); + + /// Performs QR factorisation of a matrix. + /// Returns 2 matrices, q and r, such that the columns of q are orthonormal and span the same subspace than those of the input matrix, r is an upper triangular matrix, and q*r=in. + /// Given an n-by-m input matrix, q has dimensions min(n,m)-by-m, and r has dimensions n-by-min(n,m). + /// + /// From GLM_GTX_matrix_factorisation extension. + template + GLM_FUNC_DECL void qr_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& q, mat& r); + + /// Performs RQ factorisation of a matrix. + /// Returns 2 matrices, r and q, such that r is an upper triangular matrix, the rows of q are orthonormal and span the same subspace than those of the input matrix, and r*q=in. + /// Note that in the context of RQ factorisation, the diagonal is seen as starting in the lower-right corner of the matrix, instead of the usual upper-left. + /// Given an n-by-m input matrix, r has dimensions min(n,m)-by-m, and q has dimensions n-by-min(n,m). + /// + /// From GLM_GTX_matrix_factorisation extension. + template + GLM_FUNC_DECL void rq_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& r, mat& q); + + /// @} +} + +#include "matrix_factorisation.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_factorisation.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_factorisation.inl new file mode 100644 index 000000000000..6f1683c00780 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_factorisation.inl @@ -0,0 +1,84 @@ +/// @ref gtx_matrix_factorisation + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat flipud(mat const& in) + { + mat tin = transpose(in); + tin = fliplr(tin); + mat out = transpose(tin); + + return out; + } + + template + GLM_FUNC_QUALIFIER mat fliplr(mat const& in) + { + mat out; + for (length_t i = 0; i < C; i++) + { + out[i] = in[(C - i) - 1]; + } + + return out; + } + + template + GLM_FUNC_QUALIFIER void qr_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& q, mat& r) + { + // Uses modified Gram-Schmidt method + // Source: https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process + // And https://en.wikipedia.org/wiki/QR_decomposition + + //For all the linearly independs columns of the input... + // (there can be no more linearly independents columns than there are rows.) + for (length_t i = 0; i < (C < R ? C : R); i++) + { + //Copy in Q the input's i-th column. + q[i] = in[i]; + + //j = [0,i[ + // Make that column orthogonal to all the previous ones by substracting to it the non-orthogonal projection of all the previous columns. + // Also: Fill the zero elements of R + for (length_t j = 0; j < i; j++) + { + q[i] -= dot(q[i], q[j])*q[j]; + r[j][i] = 0; + } + + //Now, Q i-th column is orthogonal to all the previous columns. Normalize it. + q[i] = normalize(q[i]); + + //j = [i,C[ + //Finally, compute the corresponding coefficients of R by computing the projection of the resulting column on the other columns of the input. + for (length_t j = i; j < C; j++) + { + r[j][i] = dot(in[j], q[i]); + } + } + } + + template + GLM_FUNC_QUALIFIER void rq_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& r, mat& q) + { + // From https://en.wikipedia.org/wiki/QR_decomposition: + // The RQ decomposition transforms a matrix A into the product of an upper triangular matrix R (also known as right-triangular) and an orthogonal matrix Q. The only difference from QR decomposition is the order of these matrices. + // QR decomposition is Gram-Schmidt orthogonalization of columns of A, started from the first column. + // RQ decomposition is Gram-Schmidt orthogonalization of rows of A, started from the last row. + + mat tin = transpose(in); + tin = fliplr(tin); + + mat tr; + mat<(C < R ? C : R), C, T, Q> tq; + qr_decompose(tin, tq, tr); + + tr = fliplr(tr); + r = transpose(tr); + r = fliplr(r); + + tq = fliplr(tq); + q = transpose(tq); + } +} //namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_interpolation.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_interpolation.hpp new file mode 100644 index 000000000000..7d5ad4cd9ad9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_interpolation.hpp @@ -0,0 +1,60 @@ +/// @ref gtx_matrix_interpolation +/// @file glm/gtx/matrix_interpolation.hpp +/// @author Ghenadii Ursachi (the.asteroth@gmail.com) +/// +/// @see core (dependence) +/// +/// @defgroup gtx_matrix_interpolation GLM_GTX_matrix_interpolation +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Allows to directly interpolate two matrices. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_matrix_interpolation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_matrix_interpolation extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_interpolation + /// @{ + + /// Get the axis and angle of the rotation from a matrix. + /// From GLM_GTX_matrix_interpolation extension. + template + GLM_FUNC_DECL void axisAngle( + mat<4, 4, T, Q> const& Mat, vec<3, T, Q> & Axis, T & Angle); + + /// Build a matrix from axis and angle. + /// From GLM_GTX_matrix_interpolation extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> axisAngleMatrix( + vec<3, T, Q> const& Axis, T const Angle); + + /// Extracts the rotation part of a matrix. + /// From GLM_GTX_matrix_interpolation extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> extractMatrixRotation( + mat<4, 4, T, Q> const& Mat); + + /// Build a interpolation of 4 * 4 matrixes. + /// From GLM_GTX_matrix_interpolation extension. + /// Warning! works only with rotation and/or translation matrixes, scale will generate unexpected results. + template + GLM_FUNC_DECL mat<4, 4, T, Q> interpolate( + mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2, T const Delta); + + /// @} +}//namespace glm + +#include "matrix_interpolation.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_interpolation.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_interpolation.inl new file mode 100644 index 000000000000..f4ba3a6f3ac6 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_interpolation.inl @@ -0,0 +1,146 @@ +/// @ref gtx_matrix_interpolation + +#include "../ext/scalar_constants.hpp" + +#include + +namespace glm +{ + template + GLM_FUNC_QUALIFIER void axisAngle(mat<4, 4, T, Q> const& m, vec<3, T, Q>& axis, T& angle) + { + T const epsilon = + std::numeric_limits::epsilon() * static_cast(1e2); + + bool const nearSymmetrical = + abs(m[1][0] - m[0][1]) < epsilon && + abs(m[2][0] - m[0][2]) < epsilon && + abs(m[2][1] - m[1][2]) < epsilon; + + if(nearSymmetrical) + { + bool const nearIdentity = + abs(m[1][0] + m[0][1]) < epsilon && + abs(m[2][0] + m[0][2]) < epsilon && + abs(m[2][1] + m[1][2]) < epsilon && + abs(m[0][0] + m[1][1] + m[2][2] - T(3.0)) < epsilon; + if (nearIdentity) + { + angle = static_cast(0.0); + axis = vec<3, T, Q>( + static_cast(1.0), static_cast(0.0), static_cast(0.0)); + return; + } + angle = pi(); + T xx = (m[0][0] + static_cast(1.0)) * static_cast(0.5); + T yy = (m[1][1] + static_cast(1.0)) * static_cast(0.5); + T zz = (m[2][2] + static_cast(1.0)) * static_cast(0.5); + T xy = (m[1][0] + m[0][1]) * static_cast(0.25); + T xz = (m[2][0] + m[0][2]) * static_cast(0.25); + T yz = (m[2][1] + m[1][2]) * static_cast(0.25); + if((xx > yy) && (xx > zz)) + { + if(xx < epsilon) + { + axis.x = static_cast(0.0); + axis.y = static_cast(0.7071); + axis.z = static_cast(0.7071); + } + else + { + axis.x = sqrt(xx); + axis.y = xy / axis.x; + axis.z = xz / axis.x; + } + } + else if (yy > zz) + { + if(yy < epsilon) + { + axis.x = static_cast(0.7071); + axis.y = static_cast(0.0); + axis.z = static_cast(0.7071); + } + else + { + axis.y = sqrt(yy); + axis.x = xy / axis.y; + axis.z = yz / axis.y; + } + } + else + { + if (zz < epsilon) + { + axis.x = static_cast(0.7071); + axis.y = static_cast(0.7071); + axis.z = static_cast(0.0); + } + else + { + axis.z = sqrt(zz); + axis.x = xz / axis.z; + axis.y = yz / axis.z; + } + } + return; + } + + T const angleCos = (m[0][0] + m[1][1] + m[2][2] - static_cast(1)) * static_cast(0.5); + if(angleCos >= static_cast(1.0)) + { + angle = static_cast(0.0); + } + else if (angleCos <= static_cast(-1.0)) + { + angle = pi(); + } + else + { + angle = acos(angleCos); + } + + axis = glm::normalize(glm::vec<3, T, Q>( + m[1][2] - m[2][1], m[2][0] - m[0][2], m[0][1] - m[1][0])); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> axisAngleMatrix(vec<3, T, Q> const& axis, T const angle) + { + T c = cos(angle); + T s = sin(angle); + T t = static_cast(1) - c; + vec<3, T, Q> n = normalize(axis); + + return mat<4, 4, T, Q>( + t * n.x * n.x + c, t * n.x * n.y + n.z * s, t * n.x * n.z - n.y * s, static_cast(0.0), + t * n.x * n.y - n.z * s, t * n.y * n.y + c, t * n.y * n.z + n.x * s, static_cast(0.0), + t * n.x * n.z + n.y * s, t * n.y * n.z - n.x * s, t * n.z * n.z + c, static_cast(0.0), + static_cast(0.0), static_cast(0.0), static_cast(0.0), static_cast(1.0)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> extractMatrixRotation(mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + m[0][0], m[0][1], m[0][2], static_cast(0.0), + m[1][0], m[1][1], m[1][2], static_cast(0.0), + m[2][0], m[2][1], m[2][2], static_cast(0.0), + static_cast(0.0), static_cast(0.0), static_cast(0.0), static_cast(1.0)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> interpolate(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2, T const delta) + { + mat<4, 4, T, Q> m1rot = extractMatrixRotation(m1); + mat<4, 4, T, Q> dltRotation = m2 * transpose(m1rot); + vec<3, T, Q> dltAxis; + T dltAngle; + axisAngle(dltRotation, dltAxis, dltAngle); + mat<4, 4, T, Q> out = axisAngleMatrix(dltAxis, dltAngle * delta) * m1rot; + out[3][0] = m1[3][0] + delta * (m2[3][0] - m1[3][0]); + out[3][1] = m1[3][1] + delta * (m2[3][1] - m1[3][1]); + out[3][2] = m1[3][2] + delta * (m2[3][2] - m1[3][2]); + return out; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_major_storage.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_major_storage.hpp new file mode 100644 index 000000000000..8c6bc22d14e9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_major_storage.hpp @@ -0,0 +1,119 @@ +/// @ref gtx_matrix_major_storage +/// @file glm/gtx/matrix_major_storage.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_matrix_major_storage GLM_GTX_matrix_major_storage +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Build matrices with specific matrix order, row or column + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_matrix_major_storage is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_matrix_major_storage extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_major_storage + /// @{ + + //! Build a row major matrix from row vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> rowMajor2( + vec<2, T, Q> const& v1, + vec<2, T, Q> const& v2); + + //! Build a row major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> rowMajor2( + mat<2, 2, T, Q> const& m); + + //! Build a row major matrix from row vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> rowMajor3( + vec<3, T, Q> const& v1, + vec<3, T, Q> const& v2, + vec<3, T, Q> const& v3); + + //! Build a row major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> rowMajor3( + mat<3, 3, T, Q> const& m); + + //! Build a row major matrix from row vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> rowMajor4( + vec<4, T, Q> const& v1, + vec<4, T, Q> const& v2, + vec<4, T, Q> const& v3, + vec<4, T, Q> const& v4); + + //! Build a row major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> rowMajor4( + mat<4, 4, T, Q> const& m); + + //! Build a column major matrix from column vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> colMajor2( + vec<2, T, Q> const& v1, + vec<2, T, Q> const& v2); + + //! Build a column major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> colMajor2( + mat<2, 2, T, Q> const& m); + + //! Build a column major matrix from column vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> colMajor3( + vec<3, T, Q> const& v1, + vec<3, T, Q> const& v2, + vec<3, T, Q> const& v3); + + //! Build a column major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> colMajor3( + mat<3, 3, T, Q> const& m); + + //! Build a column major matrix from column vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> colMajor4( + vec<4, T, Q> const& v1, + vec<4, T, Q> const& v2, + vec<4, T, Q> const& v3, + vec<4, T, Q> const& v4); + + //! Build a column major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> colMajor4( + mat<4, 4, T, Q> const& m); + + /// @} +}//namespace glm + +#include "matrix_major_storage.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_major_storage.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_major_storage.inl new file mode 100644 index 000000000000..279dd3433d0b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_major_storage.inl @@ -0,0 +1,166 @@ +/// @ref gtx_matrix_major_storage + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> rowMajor2 + ( + vec<2, T, Q> const& v1, + vec<2, T, Q> const& v2 + ) + { + mat<2, 2, T, Q> Result; + Result[0][0] = v1.x; + Result[1][0] = v1.y; + Result[0][1] = v2.x; + Result[1][1] = v2.y; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> rowMajor2( + const mat<2, 2, T, Q>& m) + { + mat<2, 2, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rowMajor3( + const vec<3, T, Q>& v1, + const vec<3, T, Q>& v2, + const vec<3, T, Q>& v3) + { + mat<3, 3, T, Q> Result; + Result[0][0] = v1.x; + Result[1][0] = v1.y; + Result[2][0] = v1.z; + Result[0][1] = v2.x; + Result[1][1] = v2.y; + Result[2][1] = v2.z; + Result[0][2] = v3.x; + Result[1][2] = v3.y; + Result[2][2] = v3.z; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rowMajor3( + const mat<3, 3, T, Q>& m) + { + mat<3, 3, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rowMajor4( + const vec<4, T, Q>& v1, + const vec<4, T, Q>& v2, + const vec<4, T, Q>& v3, + const vec<4, T, Q>& v4) + { + mat<4, 4, T, Q> Result; + Result[0][0] = v1.x; + Result[1][0] = v1.y; + Result[2][0] = v1.z; + Result[3][0] = v1.w; + Result[0][1] = v2.x; + Result[1][1] = v2.y; + Result[2][1] = v2.z; + Result[3][1] = v2.w; + Result[0][2] = v3.x; + Result[1][2] = v3.y; + Result[2][2] = v3.z; + Result[3][2] = v3.w; + Result[0][3] = v4.x; + Result[1][3] = v4.y; + Result[2][3] = v4.z; + Result[3][3] = v4.w; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rowMajor4( + const mat<4, 4, T, Q>& m) + { + mat<4, 4, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[0][3] = m[3][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[1][3] = m[3][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + Result[2][3] = m[3][2]; + Result[3][0] = m[0][3]; + Result[3][1] = m[1][3]; + Result[3][2] = m[2][3]; + Result[3][3] = m[3][3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> colMajor2( + const vec<2, T, Q>& v1, + const vec<2, T, Q>& v2) + { + return mat<2, 2, T, Q>(v1, v2); + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> colMajor2( + const mat<2, 2, T, Q>& m) + { + return mat<2, 2, T, Q>(m); + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> colMajor3( + const vec<3, T, Q>& v1, + const vec<3, T, Q>& v2, + const vec<3, T, Q>& v3) + { + return mat<3, 3, T, Q>(v1, v2, v3); + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> colMajor3( + const mat<3, 3, T, Q>& m) + { + return mat<3, 3, T, Q>(m); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> colMajor4( + const vec<4, T, Q>& v1, + const vec<4, T, Q>& v2, + const vec<4, T, Q>& v3, + const vec<4, T, Q>& v4) + { + return mat<4, 4, T, Q>(v1, v2, v3, v4); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> colMajor4( + const mat<4, 4, T, Q>& m) + { + return mat<4, 4, T, Q>(m); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_operation.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_operation.hpp new file mode 100644 index 000000000000..de6ff1f86f47 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_operation.hpp @@ -0,0 +1,103 @@ +/// @ref gtx_matrix_operation +/// @file glm/gtx/matrix_operation.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_matrix_operation GLM_GTX_matrix_operation +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Build diagonal matrices from vectors. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_matrix_operation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_matrix_operation extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_operation + /// @{ + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> diagonal2x2( + vec<2, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<2, 3, T, Q> diagonal2x3( + vec<2, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<2, 4, T, Q> diagonal2x4( + vec<2, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<3, 2, T, Q> diagonal3x2( + vec<2, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> diagonal3x3( + vec<3, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<3, 4, T, Q> diagonal3x4( + vec<3, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<4, 2, T, Q> diagonal4x2( + vec<2, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<4, 3, T, Q> diagonal4x3( + vec<3, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> diagonal4x4( + vec<4, T, Q> const& v); + + /// Build an adjugate matrix. + /// From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> adjugate(mat<2, 2, T, Q> const& m); + + /// Build an adjugate matrix. + /// From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> adjugate(mat<3, 3, T, Q> const& m); + + /// Build an adjugate matrix. + /// From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> adjugate(mat<4, 4, T, Q> const& m); + + /// @} +}//namespace glm + +#include "matrix_operation.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_operation.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_operation.inl new file mode 100644 index 000000000000..a4f4a850002f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_operation.inl @@ -0,0 +1,176 @@ +/// @ref gtx_matrix_operation + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> diagonal2x2 + ( + vec<2, T, Q> const& v + ) + { + mat<2, 2, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 3, T, Q> diagonal2x3 + ( + vec<2, T, Q> const& v + ) + { + mat<2, 3, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 4, T, Q> diagonal2x4 + ( + vec<2, T, Q> const& v + ) + { + mat<2, 4, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 2, T, Q> diagonal3x2 + ( + vec<2, T, Q> const& v + ) + { + mat<3, 2, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> diagonal3x3 + ( + vec<3, T, Q> const& v + ) + { + mat<3, 3, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + Result[2][2] = v[2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 4, T, Q> diagonal3x4 + ( + vec<3, T, Q> const& v + ) + { + mat<3, 4, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + Result[2][2] = v[2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> diagonal4x4 + ( + vec<4, T, Q> const& v + ) + { + mat<4, 4, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + Result[2][2] = v[2]; + Result[3][3] = v[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 3, T, Q> diagonal4x3 + ( + vec<3, T, Q> const& v + ) + { + mat<4, 3, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + Result[2][2] = v[2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 2, T, Q> diagonal4x2 + ( + vec<2, T, Q> const& v + ) + { + mat<4, 2, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> adjugate(mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + +m[1][1], -m[0][1], + -m[1][0], +m[0][0]); + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> adjugate(mat<3, 3, T, Q> const& m) + { + T const m00 = determinant(mat<2, 2, T, Q>(m[1][1], m[2][1], m[1][2], m[2][2])); + T const m01 = determinant(mat<2, 2, T, Q>(m[0][1], m[2][1], m[0][2], m[2][2])); + T const m02 = determinant(mat<2, 2, T, Q>(m[0][1], m[1][1], m[0][2], m[1][2])); + + T const m10 = determinant(mat<2, 2, T, Q>(m[1][0], m[2][0], m[1][2], m[2][2])); + T const m11 = determinant(mat<2, 2, T, Q>(m[0][0], m[2][0], m[0][2], m[2][2])); + T const m12 = determinant(mat<2, 2, T, Q>(m[0][0], m[1][0], m[0][2], m[1][2])); + + T const m20 = determinant(mat<2, 2, T, Q>(m[1][0], m[2][0], m[1][1], m[2][1])); + T const m21 = determinant(mat<2, 2, T, Q>(m[0][0], m[2][0], m[0][1], m[2][1])); + T const m22 = determinant(mat<2, 2, T, Q>(m[0][0], m[1][0], m[0][1], m[1][1])); + + return mat<3, 3, T, Q>( + +m00, -m01, +m02, + -m10, +m11, -m12, + +m20, -m21, +m22); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> adjugate(mat<4, 4, T, Q> const& m) + { + T const m00 = determinant(mat<3, 3, T, Q>(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3])); + T const m01 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3])); + T const m02 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3])); + T const m03 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2])); + + T const m10 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3])); + T const m11 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3])); + T const m12 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3])); + T const m13 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2])); + + T const m20 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3])); + T const m21 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3])); + T const m22 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3])); + T const m23 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2])); + + T const m30 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3])); + T const m31 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3])); + T const m32 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3])); + T const m33 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2])); + + return mat<4, 4, T, Q>( + +m00, -m10, +m20, -m30, + -m01, +m11, -m21, +m31, + +m02, -m12, +m22, -m32, + -m03, +m13, -m23, +m33); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_query.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_query.hpp new file mode 100644 index 000000000000..8011b2b1d469 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_query.hpp @@ -0,0 +1,77 @@ +/// @ref gtx_matrix_query +/// @file glm/gtx/matrix_query.hpp +/// +/// @see core (dependence) +/// @see gtx_vector_query (dependence) +/// +/// @defgroup gtx_matrix_query GLM_GTX_matrix_query +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Query to evaluate matrix properties + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/vector_query.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_matrix_query is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_matrix_query extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_query + /// @{ + + /// Return whether a matrix a null matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNull(mat<2, 2, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix a null matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNull(mat<3, 3, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix is a null matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNull(mat<4, 4, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix is an identity matrix. + /// From GLM_GTX_matrix_query extension. + template class matType> + GLM_FUNC_DECL bool isIdentity(matType const& m, T const& epsilon); + + /// Return whether a matrix is a normalized matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNormalized(mat<2, 2, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix is a normalized matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNormalized(mat<3, 3, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix is a normalized matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNormalized(mat<4, 4, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix is an orthonormalized matrix. + /// From GLM_GTX_matrix_query extension. + template class matType> + GLM_FUNC_DECL bool isOrthogonal(matType const& m, T const& epsilon); + + /// @} +}//namespace glm + +#include "matrix_query.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_query.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_query.inl new file mode 100644 index 000000000000..dc3ec8453b68 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_query.inl @@ -0,0 +1,119 @@ +/// @ref gtx_matrix_query + +namespace glm +{ + template + GLM_FUNC_QUALIFIER bool isNull(mat<2, 2, T, Q> const& m, T const& epsilon) + { + bool result = true; + for(length_t i = 0; result && i < m.length() ; ++i) + result = isNull(m[i], epsilon); + return result; + } + + template + GLM_FUNC_QUALIFIER bool isNull(mat<3, 3, T, Q> const& m, T const& epsilon) + { + bool result = true; + for(length_t i = 0; result && i < m.length() ; ++i) + result = isNull(m[i], epsilon); + return result; + } + + template + GLM_FUNC_QUALIFIER bool isNull(mat<4, 4, T, Q> const& m, T const& epsilon) + { + bool result = true; + for(length_t i = 0; result && i < m.length() ; ++i) + result = isNull(m[i], epsilon); + return result; + } + + template + GLM_FUNC_QUALIFIER bool isIdentity(mat const& m, T const& epsilon) + { + bool result = true; + for(length_t i = 0; result && i < m.length(); ++i) + { + for(length_t j = 0; result && j < glm::min(i, m[0].length()); ++j) + result = abs(m[i][j]) <= epsilon; + if(result && i < m[0].length()) + result = abs(m[i][i] - 1) <= epsilon; + for(length_t j = i + 1; result && j < m[0].length(); ++j) + result = abs(m[i][j]) <= epsilon; + } + return result; + } + + template + GLM_FUNC_QUALIFIER bool isNormalized(mat<2, 2, T, Q> const& m, T const& epsilon) + { + bool result(true); + for(length_t i = 0; result && i < m.length(); ++i) + result = isNormalized(m[i], epsilon); + for(length_t i = 0; result && i < m.length(); ++i) + { + typename mat<2, 2, T, Q>::col_type v; + for(length_t j = 0; j < m.length(); ++j) + v[j] = m[j][i]; + result = isNormalized(v, epsilon); + } + return result; + } + + template + GLM_FUNC_QUALIFIER bool isNormalized(mat<3, 3, T, Q> const& m, T const& epsilon) + { + bool result(true); + for(length_t i = 0; result && i < m.length(); ++i) + result = isNormalized(m[i], epsilon); + for(length_t i = 0; result && i < m.length(); ++i) + { + typename mat<3, 3, T, Q>::col_type v; + for(length_t j = 0; j < m.length(); ++j) + v[j] = m[j][i]; + result = isNormalized(v, epsilon); + } + return result; + } + + template + GLM_FUNC_QUALIFIER bool isNormalized(mat<4, 4, T, Q> const& m, T const& epsilon) + { + bool result(true); + for(length_t i = 0; result && i < m.length(); ++i) + result = isNormalized(m[i], epsilon); + for(length_t i = 0; result && i < m.length(); ++i) + { + typename mat<4, 4, T, Q>::col_type v; + for(length_t j = 0; j < m.length(); ++j) + v[j] = m[j][i]; + result = isNormalized(v, epsilon); + } + return result; + } + + template + GLM_FUNC_QUALIFIER bool isOrthogonal(mat const& m, T const& epsilon) + { + bool result = true; + for(length_t i(0); result && i < m.length(); ++i) + { + result = isNormalized(m[i], epsilon); + for(length_t j(i + 1); result && j < m.length(); ++j) + result = abs(dot(m[i], m[j])) <= epsilon; + } + + if(result) + { + mat tmp = transpose(m); + for(length_t i(0); result && i < m.length(); ++i) + { + result = isNormalized(tmp[i], epsilon); + for(length_t j(i + 1); result && j < m.length(); ++j) + result = abs(dot(tmp[i], tmp[j])) <= epsilon; + } + } + return result; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_transform_2d.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_transform_2d.hpp new file mode 100644 index 000000000000..5f9c54021851 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_transform_2d.hpp @@ -0,0 +1,81 @@ +/// @ref gtx_matrix_transform_2d +/// @file glm/gtx/matrix_transform_2d.hpp +/// @author Miguel Ángel Pérez Martínez +/// +/// @see core (dependence) +/// +/// @defgroup gtx_matrix_transform_2d GLM_GTX_matrix_transform_2d +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defines functions that generate common 2d transformation matrices. + +#pragma once + +// Dependency: +#include "../mat3x3.hpp" +#include "../vec2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_matrix_transform_2d is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_matrix_transform_2d extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_transform_2d + /// @{ + + /// Builds a translation 3 * 3 matrix created from a vector of 2 components. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param v Coordinates of a translation vector. + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> translate( + mat<3, 3, T, Q> const& m, + vec<2, T, Q> const& v); + + /// Builds a rotation 3 * 3 matrix created from an angle. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param angle Rotation angle expressed in radians. + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rotate( + mat<3, 3, T, Q> const& m, + T angle); + + /// Builds a scale 3 * 3 matrix created from a vector of 2 components. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param v Coordinates of a scale vector. + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> scale( + mat<3, 3, T, Q> const& m, + vec<2, T, Q> const& v); + + /// Builds an horizontal (parallel to the x axis) shear 3 * 3 matrix. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param y Shear factor. + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX( + mat<3, 3, T, Q> const& m, + T y); + + /// Builds a vertical (parallel to the y axis) shear 3 * 3 matrix. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param x Shear factor. + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY( + mat<3, 3, T, Q> const& m, + T x); + + /// @} +}//namespace glm + +#include "matrix_transform_2d.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_transform_2d.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_transform_2d.inl new file mode 100644 index 000000000000..a68d24dc9825 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/matrix_transform_2d.inl @@ -0,0 +1,68 @@ +/// @ref gtx_matrix_transform_2d +/// @author Miguel Ángel Pérez Martínez + +#include "../trigonometric.hpp" + +namespace glm +{ + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> translate( + mat<3, 3, T, Q> const& m, + vec<2, T, Q> const& v) + { + mat<3, 3, T, Q> Result(m); + Result[2] = m[0] * v[0] + m[1] * v[1] + m[2]; + return Result; + } + + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rotate( + mat<3, 3, T, Q> const& m, + T angle) + { + T const a = angle; + T const c = cos(a); + T const s = sin(a); + + mat<3, 3, T, Q> Result; + Result[0] = m[0] * c + m[1] * s; + Result[1] = m[0] * -s + m[1] * c; + Result[2] = m[2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> scale( + mat<3, 3, T, Q> const& m, + vec<2, T, Q> const& v) + { + mat<3, 3, T, Q> Result; + Result[0] = m[0] * v[0]; + Result[1] = m[1] * v[1]; + Result[2] = m[2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX( + mat<3, 3, T, Q> const& m, + T y) + { + mat<3, 3, T, Q> Result(1); + Result[0][1] = y; + return m * Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY( + mat<3, 3, T, Q> const& m, + T x) + { + mat<3, 3, T, Q> Result(1); + Result[1][0] = x; + return m * Result; + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/mixed_product.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/mixed_product.hpp new file mode 100644 index 000000000000..b242e357e57a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/mixed_product.hpp @@ -0,0 +1,41 @@ +/// @ref gtx_mixed_product +/// @file glm/gtx/mixed_product.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_mixed_product GLM_GTX_mixed_producte +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Mixed product of 3 vectors. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_mixed_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_mixed_product extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_mixed_product + /// @{ + + /// @brief Mixed product of 3 vectors (from GLM_GTX_mixed_product extension) + template + GLM_FUNC_DECL T mixedProduct( + vec<3, T, Q> const& v1, + vec<3, T, Q> const& v2, + vec<3, T, Q> const& v3); + + /// @} +}// namespace glm + +#include "mixed_product.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/mixed_product.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/mixed_product.inl new file mode 100644 index 000000000000..e5cdbdb49a2b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/mixed_product.inl @@ -0,0 +1,15 @@ +/// @ref gtx_mixed_product + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T mixedProduct + ( + vec<3, T, Q> const& v1, + vec<3, T, Q> const& v2, + vec<3, T, Q> const& v3 + ) + { + return dot(cross(v1, v2), v3); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/norm.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/norm.hpp new file mode 100644 index 000000000000..dfaebb7a8be2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/norm.hpp @@ -0,0 +1,88 @@ +/// @ref gtx_norm +/// @file glm/gtx/norm.hpp +/// +/// @see core (dependence) +/// @see gtx_quaternion (dependence) +/// @see gtx_component_wise (dependence) +/// +/// @defgroup gtx_norm GLM_GTX_norm +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Various ways to compute vector norms. + +#pragma once + +// Dependency: +#include "../geometric.hpp" +#include "../gtx/quaternion.hpp" +#include "../gtx/component_wise.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_norm is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_norm extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_norm + /// @{ + + /// Returns the squared length of x. + /// From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T length2(vec const& x); + + /// Returns the squared distance between p0 and p1, i.e., length2(p0 - p1). + /// From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T distance2(vec const& p0, vec const& p1); + + //! Returns the L1 norm between x and y. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T l1Norm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); + + //! Returns the L1 norm of v. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T l1Norm(vec<3, T, Q> const& v); + + //! Returns the L2 norm between x and y. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T l2Norm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); + + //! Returns the L2 norm of v. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T l2Norm(vec<3, T, Q> const& x); + + //! Returns the L norm between x and y. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T lxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y, unsigned int Depth); + + //! Returns the L norm of v. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T lxNorm(vec<3, T, Q> const& x, unsigned int Depth); + + //! Returns the LMax norm between x and y. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T lMaxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); + + //! Returns the LMax norm of v. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T lMaxNorm(vec<3, T, Q> const& x); + + /// @} +}//namespace glm + +#include "norm.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/norm.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/norm.inl new file mode 100644 index 000000000000..4a9f796451fd --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/norm.inl @@ -0,0 +1,95 @@ +/// @ref gtx_norm + +#include "../detail/qualifier.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_length2 + { + GLM_FUNC_QUALIFIER static T call(vec const& v) + { + return dot(v, v); + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER genType length2(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'length2' accepts only floating-point inputs"); + return x * x; + } + + template + GLM_FUNC_QUALIFIER T length2(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'length2' accepts only floating-point inputs"); + return detail::compute_length2::value>::call(v); + } + + template + GLM_FUNC_QUALIFIER T distance2(T p0, T p1) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'distance2' accepts only floating-point inputs"); + return length2(p1 - p0); + } + + template + GLM_FUNC_QUALIFIER T distance2(vec const& p0, vec const& p1) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'distance2' accepts only floating-point inputs"); + return length2(p1 - p0); + } + + template + GLM_FUNC_QUALIFIER T l1Norm(vec<3, T, Q> const& a, vec<3, T, Q> const& b) + { + return abs(b.x - a.x) + abs(b.y - a.y) + abs(b.z - a.z); + } + + template + GLM_FUNC_QUALIFIER T l1Norm(vec<3, T, Q> const& v) + { + return abs(v.x) + abs(v.y) + abs(v.z); + } + + template + GLM_FUNC_QUALIFIER T l2Norm(vec<3, T, Q> const& a, vec<3, T, Q> const& b + ) + { + return length(b - a); + } + + template + GLM_FUNC_QUALIFIER T l2Norm(vec<3, T, Q> const& v) + { + return length(v); + } + + template + GLM_FUNC_QUALIFIER T lxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y, unsigned int Depth) + { + return pow(pow(abs(y.x - x.x), T(Depth)) + pow(abs(y.y - x.y), T(Depth)) + pow(abs(y.z - x.z), T(Depth)), T(1) / T(Depth)); + } + + template + GLM_FUNC_QUALIFIER T lxNorm(vec<3, T, Q> const& v, unsigned int Depth) + { + return pow(pow(abs(v.x), T(Depth)) + pow(abs(v.y), T(Depth)) + pow(abs(v.z), T(Depth)), T(1) / T(Depth)); + } + + template + GLM_FUNC_QUALIFIER T lMaxNorm(vec<3, T, Q> const& a, vec<3, T, Q> const& b) + { + return compMax(abs(b - a)); + } + + template + GLM_FUNC_QUALIFIER T lMaxNorm(vec<3, T, Q> const& v) + { + return compMax(abs(v)); + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/normal.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/normal.hpp new file mode 100644 index 000000000000..068682f75f2d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/normal.hpp @@ -0,0 +1,41 @@ +/// @ref gtx_normal +/// @file glm/gtx/normal.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_normal GLM_GTX_normal +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Compute the normal of a triangle. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_normal is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_normal extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_normal + /// @{ + + /// Computes triangle normal from triangle points. + /// + /// @see gtx_normal + template + GLM_FUNC_DECL vec<3, T, Q> triangleNormal(vec<3, T, Q> const& p1, vec<3, T, Q> const& p2, vec<3, T, Q> const& p3); + + /// @} +}//namespace glm + +#include "normal.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/normal.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/normal.inl new file mode 100644 index 000000000000..74f9fc994585 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/normal.inl @@ -0,0 +1,15 @@ +/// @ref gtx_normal + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> triangleNormal + ( + vec<3, T, Q> const& p1, + vec<3, T, Q> const& p2, + vec<3, T, Q> const& p3 + ) + { + return normalize(cross(p1 - p2, p1 - p3)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/normalize_dot.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/normalize_dot.hpp new file mode 100644 index 000000000000..127aa1f65a85 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/normalize_dot.hpp @@ -0,0 +1,49 @@ +/// @ref gtx_normalize_dot +/// @file glm/gtx/normalize_dot.hpp +/// +/// @see core (dependence) +/// @see gtx_fast_square_root (dependence) +/// +/// @defgroup gtx_normalize_dot GLM_GTX_normalize_dot +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Dot product of vectors that need to be normalize with a single square root. + +#pragma once + +// Dependency: +#include "../gtx/fast_square_root.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_normalize_dot is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_normalize_dot extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_normalize_dot + /// @{ + + /// Normalize parameters and returns the dot product of x and y. + /// It's faster that dot(normalize(x), normalize(y)). + /// + /// @see gtx_normalize_dot extension. + template + GLM_FUNC_DECL T normalizeDot(vec const& x, vec const& y); + + /// Normalize parameters and returns the dot product of x and y. + /// Faster that dot(fastNormalize(x), fastNormalize(y)). + /// + /// @see gtx_normalize_dot extension. + template + GLM_FUNC_DECL T fastNormalizeDot(vec const& x, vec const& y); + + /// @} +}//namespace glm + +#include "normalize_dot.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/normalize_dot.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/normalize_dot.inl new file mode 100644 index 000000000000..7bcd9a534a8f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/normalize_dot.inl @@ -0,0 +1,16 @@ +/// @ref gtx_normalize_dot + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T normalizeDot(vec const& x, vec const& y) + { + return glm::dot(x, y) * glm::inversesqrt(glm::dot(x, x) * glm::dot(y, y)); + } + + template + GLM_FUNC_QUALIFIER T fastNormalizeDot(vec const& x, vec const& y) + { + return glm::dot(x, y) * glm::fastInverseSqrt(glm::dot(x, x) * glm::dot(y, y)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/number_precision.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/number_precision.hpp new file mode 100644 index 000000000000..3bf0ad68bdd0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/number_precision.hpp @@ -0,0 +1,47 @@ +/// @ref gtx_number_precision +/// @file glm/gtx/number_precision.hpp +/// +/// @see core (dependence) +/// @see gtc_type_precision (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtx_number_precision GLM_GTX_number_precision +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defined size types. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/type_precision.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_number_precision is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_number_precision extension included") +# endif +#endif + +namespace glm{ + ///////////////////////////// + // Unsigned int vector types + + /// @addtogroup gtx_number_precision + /// @{ + + ////////////////////// + // Float matrix types + + typedef f32 f32mat1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) + typedef f32 f32mat1x1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) + typedef f64 f64mat1; //!< \brief Double-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) + typedef f64 f64mat1x1; //!< \brief Double-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) + + /// @} +}//namespace glm + +#include "number_precision.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/optimum_pow.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/optimum_pow.hpp new file mode 100644 index 000000000000..a8ff6002f123 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/optimum_pow.hpp @@ -0,0 +1,52 @@ +/// @ref gtx_optimum_pow +/// @file glm/gtx/optimum_pow.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_optimum_pow GLM_GTX_optimum_pow +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Integer exponentiation of power functions. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_optimum_pow is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_optimum_pow extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_optimum_pow + /// @{ + + /// Returns x raised to the power of 2. + /// + /// @see gtx_optimum_pow + template + GLM_FUNC_DECL genType pow2(genType const& x); + + /// Returns x raised to the power of 3. + /// + /// @see gtx_optimum_pow + template + GLM_FUNC_DECL genType pow3(genType const& x); + + /// Returns x raised to the power of 4. + /// + /// @see gtx_optimum_pow + template + GLM_FUNC_DECL genType pow4(genType const& x); + + /// @} +}//namespace glm + +#include "optimum_pow.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/optimum_pow.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/optimum_pow.inl new file mode 100644 index 000000000000..a26c19c18bfb --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/optimum_pow.inl @@ -0,0 +1,22 @@ +/// @ref gtx_optimum_pow + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType pow2(genType const& x) + { + return x * x; + } + + template + GLM_FUNC_QUALIFIER genType pow3(genType const& x) + { + return x * x * x; + } + + template + GLM_FUNC_QUALIFIER genType pow4(genType const& x) + { + return (x * x) * (x * x); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/orthonormalize.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/orthonormalize.hpp new file mode 100644 index 000000000000..3e004fb06f9c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/orthonormalize.hpp @@ -0,0 +1,49 @@ +/// @ref gtx_orthonormalize +/// @file glm/gtx/orthonormalize.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_orthonormalize GLM_GTX_orthonormalize +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Orthonormalize matrices. + +#pragma once + +// Dependency: +#include "../vec3.hpp" +#include "../mat3x3.hpp" +#include "../geometric.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_orthonormalize is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_orthonormalize extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_orthonormalize + /// @{ + + /// Returns the orthonormalized matrix of m. + /// + /// @see gtx_orthonormalize + template + GLM_FUNC_DECL mat<3, 3, T, Q> orthonormalize(mat<3, 3, T, Q> const& m); + + /// Orthonormalizes x according y. + /// + /// @see gtx_orthonormalize + template + GLM_FUNC_DECL vec<3, T, Q> orthonormalize(vec<3, T, Q> const& x, vec<3, T, Q> const& y); + + /// @} +}//namespace glm + +#include "orthonormalize.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/orthonormalize.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/orthonormalize.inl new file mode 100644 index 000000000000..cb553ba62157 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/orthonormalize.inl @@ -0,0 +1,29 @@ +/// @ref gtx_orthonormalize + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> orthonormalize(mat<3, 3, T, Q> const& m) + { + mat<3, 3, T, Q> r = m; + + r[0] = normalize(r[0]); + + T d0 = dot(r[0], r[1]); + r[1] -= r[0] * d0; + r[1] = normalize(r[1]); + + T d1 = dot(r[1], r[2]); + d0 = dot(r[0], r[2]); + r[2] -= r[0] * d0 + r[1] * d1; + r[2] = normalize(r[2]); + + return r; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> orthonormalize(vec<3, T, Q> const& x, vec<3, T, Q> const& y) + { + return normalize(x - y * dot(y, x)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/pca.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/pca.hpp new file mode 100644 index 000000000000..d89c408e36ef --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/pca.hpp @@ -0,0 +1,115 @@ +/// @ref gtx_pca +/// @file glm/gtx/pca.hpp +/// +/// @see core (dependence) +/// @see ext_scalar_relational (dependence) +/// +/// @defgroup gtx_pca GLM_GTX_pca +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Implements functions required for fundamental 'princple component analysis' in 2D, 3D, and 4D: +/// 1) Computing a covariance matrics from a list of _relative_ position vectors +/// 2) Compute the eigenvalues and eigenvectors of the covariance matrics +/// This is useful, e.g., to compute an object-aligned bounding box from vertices of an object. +/// https://en.wikipedia.org/wiki/Principal_component_analysis +/// +/// Example: +/// ``` +/// std::vector ptData; +/// // ... fill ptData with some point data, e.g. vertices +/// +/// glm::dvec3 center = computeCenter(ptData); +/// +/// glm::dmat3 covarMat = glm::computeCovarianceMatrix(ptData.data(), ptData.size(), center); +/// +/// glm::dvec3 evals; +/// glm::dmat3 evecs; +/// int evcnt = glm::findEigenvaluesSymReal(covarMat, evals, evecs); +/// +/// if(evcnt != 3) +/// // ... error handling +/// +/// glm::sortEigenvalues(evals, evecs); +/// +/// // ... now evecs[0] points in the direction (symmetric) of the largest spatial distribution within ptData +/// ``` + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../ext/scalar_relational.hpp" + + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_pca is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_pca extension included") +# endif +#endif + +namespace glm { + /// @addtogroup gtx_pca + /// @{ + + /// Compute a covariance matrix form an array of relative coordinates `v` (e.g., relative to the center of gravity of the object) + /// @param v Points to a memory holding `n` times vectors + /// @param n Number of points in v + template + GLM_INLINE mat computeCovarianceMatrix(vec const* v, size_t n); + + /// Compute a covariance matrix form an array of absolute coordinates `v` and a precomputed center of gravity `c` + /// @param v Points to a memory holding `n` times vectors + /// @param n Number of points in v + /// @param c Precomputed center of gravity + template + GLM_INLINE mat computeCovarianceMatrix(vec const* v, size_t n, vec const& c); + + /// Compute a covariance matrix form a pair of iterators `b` (begin) and `e` (end) of a container with relative coordinates (e.g., relative to the center of gravity of the object) + /// Dereferencing an iterator of type I must yield a `vec<D, T, Q%gt;` + template + GLM_FUNC_DECL mat computeCovarianceMatrix(I const& b, I const& e); + + /// Compute a covariance matrix form a pair of iterators `b` (begin) and `e` (end) of a container with absolute coordinates and a precomputed center of gravity `c` + /// Dereferencing an iterator of type I must yield a `vec<D, T, Q%gt;` + template + GLM_FUNC_DECL mat computeCovarianceMatrix(I const& b, I const& e, vec const& c); + + /// Assuming the provided covariance matrix `covarMat` is symmetric and real-valued, this function find the `D` Eigenvalues of the matrix, and also provides the corresponding Eigenvectors. + /// Note: the data in `outEigenvalues` and `outEigenvectors` are in matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`. + /// This is a numeric implementation to find the Eigenvalues, using 'QL decomposition` (variant of QR decomposition: https://en.wikipedia.org/wiki/QR_decomposition). + /// + /// @param[in] covarMat A symmetric, real-valued covariance matrix, e.g. computed from computeCovarianceMatrix + /// @param[out] outEigenvalues Vector to receive the found eigenvalues + /// @param[out] outEigenvectors Matrix to receive the found eigenvectors corresponding to the found eigenvalues, as column vectors + /// @return The number of eigenvalues found, usually D if the precondition of the covariance matrix is met. + template + GLM_FUNC_DECL unsigned int findEigenvaluesSymReal + ( + mat const& covarMat, + vec& outEigenvalues, + mat& outEigenvectors + ); + + /// Sorts a group of Eigenvalues&Eigenvectors, for largest Eigenvalue to smallest Eigenvalue. + /// The data in `outEigenvalues` and `outEigenvectors` are assumed to be matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`. + template + GLM_FUNC_DECL void sortEigenvalues(vec<2, T, Q>& eigenvalues, mat<2, 2, T, Q>& eigenvectors); + + /// Sorts a group of Eigenvalues&Eigenvectors, for largest Eigenvalue to smallest Eigenvalue. + /// The data in `outEigenvalues` and `outEigenvectors` are assumed to be matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`. + template + GLM_FUNC_DECL void sortEigenvalues(vec<3, T, Q>& eigenvalues, mat<3, 3, T, Q>& eigenvectors); + + /// Sorts a group of Eigenvalues&Eigenvectors, for largest Eigenvalue to smallest Eigenvalue. + /// The data in `outEigenvalues` and `outEigenvectors` are assumed to be matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`. + template + GLM_FUNC_DECL void sortEigenvalues(vec<4, T, Q>& eigenvalues, mat<4, 4, T, Q>& eigenvectors); + + /// @} +}//namespace glm + +#include "pca.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/pca.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/pca.inl new file mode 100644 index 000000000000..94cae946e8a2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/pca.inl @@ -0,0 +1,343 @@ +/// @ref gtx_pca + +#ifndef GLM_HAS_CXX11_STL +#include +#else +#include +#endif + +namespace glm { + + + template + GLM_FUNC_QUALIFIER mat computeCovarianceMatrix(vec const* v, size_t n) + { + return computeCovarianceMatrix const*>(v, v + n); + } + + + template + GLM_FUNC_QUALIFIER mat computeCovarianceMatrix(vec const* v, size_t n, vec const& c) + { + return computeCovarianceMatrix const*>(v, v + n, c); + } + + + template + GLM_FUNC_QUALIFIER mat computeCovarianceMatrix(I const& b, I const& e) + { + glm::mat m(0); + + size_t cnt = 0; + for(I i = b; i != e; i++) + { + vec const& v = *i; + for(length_t x = 0; x < D; ++x) + for(length_t y = 0; y < D; ++y) + m[x][y] += static_cast(v[x] * v[y]); + cnt++; + } + if(cnt > 0) + m /= static_cast(cnt); + + return m; + } + + + template + GLM_FUNC_QUALIFIER mat computeCovarianceMatrix(I const& b, I const& e, vec const& c) + { + glm::mat m(0); + glm::vec v; + + size_t cnt = 0; + for(I i = b; i != e; i++) + { + v = *i - c; + for(length_t x = 0; x < D; ++x) + for(length_t y = 0; y < D; ++y) + m[x][y] += static_cast(v[x] * v[y]); + cnt++; + } + if(cnt > 0) + m /= static_cast(cnt); + + return m; + } + + namespace _internal_ + { + + template + GLM_FUNC_QUALIFIER static T transferSign(T const& v, T const& s) + { + return ((s) >= 0 ? glm::abs(v) : -glm::abs(v)); + } + + template + GLM_FUNC_QUALIFIER static T pythag(T const& a, T const& b) { + static const T epsilon = static_cast(0.0000001); + T absa = glm::abs(a); + T absb = glm::abs(b); + if(absa > absb) { + absb /= absa; + absb *= absb; + return absa * glm::sqrt(static_cast(1) + absb); + } + if(glm::equal(absb, 0, epsilon)) return static_cast(0); + absa /= absb; + absa *= absa; + return absb * glm::sqrt(static_cast(1) + absa); + } + + } + + template + GLM_FUNC_QUALIFIER unsigned int findEigenvaluesSymReal + ( + mat const& covarMat, + vec& outEigenvalues, + mat& outEigenvectors + ) + { + using _internal_::transferSign; + using _internal_::pythag; + + T a[D * D]; // matrix -- input and workspace for algorithm (will be changed inplace) + T d[D]; // diagonal elements + T e[D]; // off-diagonal elements + + for(length_t r = 0; r < D; r++) + for(length_t c = 0; c < D; c++) + a[(r) * D + (c)] = covarMat[c][r]; + + // 1. Householder reduction. + length_t l, k, j, i; + T scale, hh, h, g, f; + static const T epsilon = static_cast(0.0000001); + + for(i = D; i >= 2; i--) + { + l = i - 1; + h = scale = 0; + if(l > 1) + { + for(k = 1; k <= l; k++) + { + scale += glm::abs(a[(i - 1) * D + (k - 1)]); + } + if(glm::equal(scale, 0, epsilon)) + { + e[i - 1] = a[(i - 1) * D + (l - 1)]; + } + else + { + for(k = 1; k <= l; k++) + { + a[(i - 1) * D + (k - 1)] /= scale; + h += a[(i - 1) * D + (k - 1)] * a[(i - 1) * D + (k - 1)]; + } + f = a[(i - 1) * D + (l - 1)]; + g = ((f >= 0) ? -glm::sqrt(h) : glm::sqrt(h)); + e[i - 1] = scale * g; + h -= f * g; + a[(i - 1) * D + (l - 1)] = f - g; + f = 0; + for(j = 1; j <= l; j++) + { + a[(j - 1) * D + (i - 1)] = a[(i - 1) * D + (j - 1)] / h; + g = 0; + for(k = 1; k <= j; k++) + { + g += a[(j - 1) * D + (k - 1)] * a[(i - 1) * D + (k - 1)]; + } + for(k = j + 1; k <= l; k++) + { + g += a[(k - 1) * D + (j - 1)] * a[(i - 1) * D + (k - 1)]; + } + e[j - 1] = g / h; + f += e[j - 1] * a[(i - 1) * D + (j - 1)]; + } + hh = f / (h + h); + for(j = 1; j <= l; j++) + { + f = a[(i - 1) * D + (j - 1)]; + e[j - 1] = g = e[j - 1] - hh * f; + for(k = 1; k <= j; k++) + { + a[(j - 1) * D + (k - 1)] -= (f * e[k - 1] + g * a[(i - 1) * D + (k - 1)]); + } + } + } + } + else + { + e[i - 1] = a[(i - 1) * D + (l - 1)]; + } + d[i - 1] = h; + } + d[0] = 0; + e[0] = 0; + for(i = 1; i <= D; i++) + { + l = i - 1; + if(!glm::equal(d[i - 1], 0, epsilon)) + { + for(j = 1; j <= l; j++) + { + g = 0; + for(k = 1; k <= l; k++) + { + g += a[(i - 1) * D + (k - 1)] * a[(k - 1) * D + (j - 1)]; + } + for(k = 1; k <= l; k++) + { + a[(k - 1) * D + (j - 1)] -= g * a[(k - 1) * D + (i - 1)]; + } + } + } + d[i - 1] = a[(i - 1) * D + (i - 1)]; + a[(i - 1) * D + (i - 1)] = 1; + for(j = 1; j <= l; j++) + { + a[(j - 1) * D + (i - 1)] = a[(i - 1) * D + (j - 1)] = 0; + } + } + + // 2. Calculation of eigenvalues and eigenvectors (QL algorithm) + length_t m, iter; + T s, r, p, dd, c, b; + const length_t MAX_ITER = 30; + + for(i = 2; i <= D; i++) + { + e[i - 2] = e[i - 1]; + } + e[D - 1] = 0; + + for(l = 1; l <= D; l++) + { + iter = 0; + do + { + for(m = l; m <= D - 1; m++) + { + dd = glm::abs(d[m - 1]) + glm::abs(d[m - 1 + 1]); + if(glm::equal(glm::abs(e[m - 1]) + dd, dd, epsilon)) + break; + } + if(m != l) + { + if(iter++ == MAX_ITER) + { + return 0; // Too many iterations in FindEigenvalues + } + g = (d[l - 1 + 1] - d[l - 1]) / (2 * e[l - 1]); + r = pythag(g, 1); + g = d[m - 1] - d[l - 1] + e[l - 1] / (g + transferSign(r, g)); + s = c = 1; + p = 0; + for(i = m - 1; i >= l; i--) + { + f = s * e[i - 1]; + b = c * e[i - 1]; + e[i - 1 + 1] = r = pythag(f, g); + if(glm::equal(r, 0, epsilon)) + { + d[i - 1 + 1] -= p; + e[m - 1] = 0; + break; + } + s = f / r; + c = g / r; + g = d[i - 1 + 1] - p; + r = (d[i - 1] - g) * s + 2 * c * b; + d[i - 1 + 1] = g + (p = s * r); + g = c * r - b; + for(k = 1; k <= D; k++) + { + f = a[(k - 1) * D + (i - 1 + 1)]; + a[(k - 1) * D + (i - 1 + 1)] = s * a[(k - 1) * D + (i - 1)] + c * f; + a[(k - 1) * D + (i - 1)] = c * a[(k - 1) * D + (i - 1)] - s * f; + } + } + if(glm::equal(r, 0, epsilon) && (i >= l)) + continue; + d[l - 1] -= p; + e[l - 1] = g; + e[m - 1] = 0; + } + } while(m != l); + } + + // 3. output + for(i = 0; i < D; i++) + outEigenvalues[i] = d[i]; + for(i = 0; i < D; i++) + for(j = 0; j < D; j++) + outEigenvectors[i][j] = a[(j) * D + (i)]; + + return D; + } + + template + GLM_FUNC_QUALIFIER void sortEigenvalues(vec<2, T, Q>& eigenvalues, mat<2, 2, T, Q>& eigenvectors) + { + if (eigenvalues[0] < eigenvalues[1]) + { + std::swap(eigenvalues[0], eigenvalues[1]); + std::swap(eigenvectors[0], eigenvectors[1]); + } + } + + template + GLM_FUNC_QUALIFIER void sortEigenvalues(vec<3, T, Q>& eigenvalues, mat<3, 3, T, Q>& eigenvectors) + { + if (eigenvalues[0] < eigenvalues[1]) + { + std::swap(eigenvalues[0], eigenvalues[1]); + std::swap(eigenvectors[0], eigenvectors[1]); + } + if (eigenvalues[0] < eigenvalues[2]) + { + std::swap(eigenvalues[0], eigenvalues[2]); + std::swap(eigenvectors[0], eigenvectors[2]); + } + if (eigenvalues[1] < eigenvalues[2]) + { + std::swap(eigenvalues[1], eigenvalues[2]); + std::swap(eigenvectors[1], eigenvectors[2]); + } + } + + template + GLM_FUNC_QUALIFIER void sortEigenvalues(vec<4, T, Q>& eigenvalues, mat<4, 4, T, Q>& eigenvectors) + { + if (eigenvalues[0] < eigenvalues[2]) + { + std::swap(eigenvalues[0], eigenvalues[2]); + std::swap(eigenvectors[0], eigenvectors[2]); + } + if (eigenvalues[1] < eigenvalues[3]) + { + std::swap(eigenvalues[1], eigenvalues[3]); + std::swap(eigenvectors[1], eigenvectors[3]); + } + if (eigenvalues[0] < eigenvalues[1]) + { + std::swap(eigenvalues[0], eigenvalues[1]); + std::swap(eigenvectors[0], eigenvectors[1]); + } + if (eigenvalues[2] < eigenvalues[3]) + { + std::swap(eigenvalues[2], eigenvalues[3]); + std::swap(eigenvectors[2], eigenvectors[3]); + } + if (eigenvalues[1] < eigenvalues[2]) + { + std::swap(eigenvalues[1], eigenvalues[2]); + std::swap(eigenvectors[1], eigenvectors[2]); + } + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/perpendicular.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/perpendicular.hpp new file mode 100644 index 000000000000..72b77b6e2388 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/perpendicular.hpp @@ -0,0 +1,41 @@ +/// @ref gtx_perpendicular +/// @file glm/gtx/perpendicular.hpp +/// +/// @see core (dependence) +/// @see gtx_projection (dependence) +/// +/// @defgroup gtx_perpendicular GLM_GTX_perpendicular +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Perpendicular of a vector from other one + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/projection.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_perpendicular is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_perpendicular extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_perpendicular + /// @{ + + //! Projects x a perpendicular axis of Normal. + //! From GLM_GTX_perpendicular extension. + template + GLM_FUNC_DECL genType perp(genType const& x, genType const& Normal); + + /// @} +}//namespace glm + +#include "perpendicular.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/perpendicular.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/perpendicular.inl new file mode 100644 index 000000000000..1e72f334230d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/perpendicular.inl @@ -0,0 +1,10 @@ +/// @ref gtx_perpendicular + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType perp(genType const& x, genType const& Normal) + { + return x - proj(x, Normal); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/polar_coordinates.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/polar_coordinates.hpp new file mode 100644 index 000000000000..76beb82bd57c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/polar_coordinates.hpp @@ -0,0 +1,48 @@ +/// @ref gtx_polar_coordinates +/// @file glm/gtx/polar_coordinates.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_polar_coordinates GLM_GTX_polar_coordinates +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Conversion from Euclidean space to polar space and revert. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_polar_coordinates is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_polar_coordinates extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_polar_coordinates + /// @{ + + /// Convert Euclidean to Polar coordinates, x is the latitude, y the longitude and z the xz distance. + /// + /// @see gtx_polar_coordinates + template + GLM_FUNC_DECL vec<3, T, Q> polar( + vec<3, T, Q> const& euclidean); + + /// Convert Polar to Euclidean coordinates. + /// + /// @see gtx_polar_coordinates + template + GLM_FUNC_DECL vec<3, T, Q> euclidean( + vec<2, T, Q> const& polar); + + /// @} +}//namespace glm + +#include "polar_coordinates.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/polar_coordinates.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/polar_coordinates.inl new file mode 100644 index 000000000000..371c8dddebd1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/polar_coordinates.inl @@ -0,0 +1,36 @@ +/// @ref gtx_polar_coordinates + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> polar + ( + vec<3, T, Q> const& euclidean + ) + { + T const Length(length(euclidean)); + vec<3, T, Q> const tmp(euclidean / Length); + T const xz_dist(sqrt(tmp.x * tmp.x + tmp.z * tmp.z)); + + return vec<3, T, Q>( + asin(tmp.y), // latitude + atan(tmp.x, tmp.z), // longitude + xz_dist); // xz distance + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> euclidean + ( + vec<2, T, Q> const& polar + ) + { + T const latitude(polar.x); + T const longitude(polar.y); + + return vec<3, T, Q>( + cos(latitude) * sin(longitude), + sin(latitude), + cos(latitude) * cos(longitude)); + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/projection.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/projection.hpp new file mode 100644 index 000000000000..678f3ad5a585 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/projection.hpp @@ -0,0 +1,43 @@ +/// @ref gtx_projection +/// @file glm/gtx/projection.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_projection GLM_GTX_projection +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Projection of a vector to other one + +#pragma once + +// Dependency: +#include "../geometric.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_projection is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_projection extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_projection + /// @{ + + /// Projects x on Normal. + /// + /// @param[in] x A vector to project + /// @param[in] Normal A normal that doesn't need to be of unit length. + /// + /// @see gtx_projection + template + GLM_FUNC_DECL genType proj(genType const& x, genType const& Normal); + + /// @} +}//namespace glm + +#include "projection.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/projection.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/projection.inl new file mode 100644 index 000000000000..f23f884fb93a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/projection.inl @@ -0,0 +1,10 @@ +/// @ref gtx_projection + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType proj(genType const& x, genType const& Normal) + { + return glm::dot(x, Normal) / glm::dot(Normal, Normal) * Normal; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/quaternion.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/quaternion.hpp new file mode 100644 index 000000000000..35c372b816e9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/quaternion.hpp @@ -0,0 +1,174 @@ +/// @ref gtx_quaternion +/// @file glm/gtx/quaternion.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_quaternion GLM_GTX_quaternion +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Extended quaternion types and functions + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/constants.hpp" +#include "../gtc/quaternion.hpp" +#include "../ext/quaternion_exponential.hpp" +#include "../gtx/norm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_quaternion extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_quaternion + /// @{ + + /// Create an identity quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL GLM_CONSTEXPR qua quat_identity(); + + /// Compute a cross product between a quaternion and a vector. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> cross( + qua const& q, + vec<3, T, Q> const& v); + + //! Compute a cross product between a vector and a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> cross( + vec<3, T, Q> const& v, + qua const& q); + + //! Compute a point on a path according squad equation. + //! q1 and q2 are control points; s1 and s2 are intermediate control points. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL qua squad( + qua const& q1, + qua const& q2, + qua const& s1, + qua const& s2, + T const& h); + + //! Returns an intermediate control point for squad interpolation. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL qua intermediate( + qua const& prev, + qua const& curr, + qua const& next); + + //! Returns quarternion square root. + /// + /// @see gtx_quaternion + //template + //qua sqrt( + // qua const& q); + + //! Rotates a 3 components vector by a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL vec<3, T, Q> rotate( + qua const& q, + vec<3, T, Q> const& v); + + /// Rotates a 4 components vector by a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL vec<4, T, Q> rotate( + qua const& q, + vec<4, T, Q> const& v); + + /// Extract the real component of a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL T extractRealComponent( + qua const& q); + + /// Converts a quaternion to a 3 * 3 matrix. + /// + /// @see gtx_quaternion + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> toMat3( + qua const& x){return mat3_cast(x);} + + /// Converts a quaternion to a 4 * 4 matrix. + /// + /// @see gtx_quaternion + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> toMat4( + qua const& x){return mat4_cast(x);} + + /// Converts a 3 * 3 matrix to a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_QUALIFIER qua toQuat( + mat<3, 3, T, Q> const& x){return quat_cast(x);} + + /// Converts a 4 * 4 matrix to a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_QUALIFIER qua toQuat( + mat<4, 4, T, Q> const& x){return quat_cast(x);} + + /// Quaternion interpolation using the rotation short path. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL qua shortMix( + qua const& x, + qua const& y, + T const& a); + + /// Quaternion normalized linear interpolation. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL qua fastMix( + qua const& x, + qua const& y, + T const& a); + + /// Compute the rotation between two vectors. + /// @param orig vector, needs to be normalized + /// @param dest vector, needs to be normalized + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL qua rotation( + vec<3, T, Q> const& orig, + vec<3, T, Q> const& dest); + + /// Returns the squared length of x. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL GLM_CONSTEXPR T length2(qua const& q); + + /// @} +}//namespace glm + +#include "quaternion.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/quaternion.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/quaternion.inl new file mode 100644 index 000000000000..5e18899a714a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/quaternion.inl @@ -0,0 +1,159 @@ +/// @ref gtx_quaternion + +#include +#include "../gtc/constants.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua quat_identity() + { + return qua::wxyz(static_cast(1), static_cast(0), static_cast(0), static_cast(0)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> cross(vec<3, T, Q> const& v, qua const& q) + { + return inverse(q) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> cross(qua const& q, vec<3, T, Q> const& v) + { + return q * v; + } + + template + GLM_FUNC_QUALIFIER qua squad + ( + qua const& q1, + qua const& q2, + qua const& s1, + qua const& s2, + T const& h) + { + return mix(mix(q1, q2, h), mix(s1, s2, h), static_cast(2) * (static_cast(1) - h) * h); + } + + template + GLM_FUNC_QUALIFIER qua intermediate + ( + qua const& prev, + qua const& curr, + qua const& next + ) + { + qua invQuat = inverse(curr); + return exp((log(next * invQuat) + log(prev * invQuat)) / static_cast(-4)) * curr; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotate(qua const& q, vec<3, T, Q> const& v) + { + return q * v; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> rotate(qua const& q, vec<4, T, Q> const& v) + { + return q * v; + } + + template + GLM_FUNC_QUALIFIER T extractRealComponent(qua const& q) + { + T w = static_cast(1) - q.x * q.x - q.y * q.y - q.z * q.z; + if(w < T(0)) + return T(0); + else + return -sqrt(w); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T length2(qua const& q) + { + return q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w; + } + + template + GLM_FUNC_QUALIFIER qua shortMix(qua const& x, qua const& y, T const& a) + { + if(a <= static_cast(0)) return x; + if(a >= static_cast(1)) return y; + + T fCos = dot(x, y); + qua y2(y); //BUG!!! qua y2; + if(fCos < static_cast(0)) + { + y2 = -y; + fCos = -fCos; + } + + //if(fCos > 1.0f) // problem + T k0, k1; + if(fCos > (static_cast(1) - epsilon())) + { + k0 = static_cast(1) - a; + k1 = static_cast(0) + a; //BUG!!! 1.0f + a; + } + else + { + T fSin = sqrt(T(1) - fCos * fCos); + T fAngle = atan(fSin, fCos); + T fOneOverSin = static_cast(1) / fSin; + k0 = sin((static_cast(1) - a) * fAngle) * fOneOverSin; + k1 = sin((static_cast(0) + a) * fAngle) * fOneOverSin; + } + + return qua::wxyz( + k0 * x.w + k1 * y2.w, + k0 * x.x + k1 * y2.x, + k0 * x.y + k1 * y2.y, + k0 * x.z + k1 * y2.z); + } + + template + GLM_FUNC_QUALIFIER qua fastMix(qua const& x, qua const& y, T const& a) + { + return glm::normalize(x * (static_cast(1) - a) + (y * a)); + } + + template + GLM_FUNC_QUALIFIER qua rotation(vec<3, T, Q> const& orig, vec<3, T, Q> const& dest) + { + T cosTheta = dot(orig, dest); + vec<3, T, Q> rotationAxis; + + if(cosTheta >= static_cast(1) - epsilon()) { + // orig and dest point in the same direction + return quat_identity(); + } + + if(cosTheta < static_cast(-1) + epsilon()) + { + // special case when vectors in opposite directions : + // there is no "ideal" rotation axis + // So guess one; any will do as long as it's perpendicular to start + // This implementation favors a rotation around the Up axis (Y), + // since it's often what you want to do. + rotationAxis = cross(vec<3, T, Q>(0, 0, 1), orig); + if(length2(rotationAxis) < epsilon()) // bad luck, they were parallel, try again! + rotationAxis = cross(vec<3, T, Q>(1, 0, 0), orig); + + rotationAxis = normalize(rotationAxis); + return angleAxis(pi(), rotationAxis); + } + + // Implementation from Stan Melax's Game Programming Gems 1 article + rotationAxis = cross(orig, dest); + + T s = sqrt((T(1) + cosTheta) * static_cast(2)); + T invs = static_cast(1) / s; + + return qua::wxyz( + s * static_cast(0.5f), + rotationAxis.x * invs, + rotationAxis.y * invs, + rotationAxis.z * invs); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/range.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/range.hpp new file mode 100644 index 000000000000..93bcb9a65a0a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/range.hpp @@ -0,0 +1,98 @@ +/// @ref gtx_range +/// @file glm/gtx/range.hpp +/// @author Joshua Moerman +/// +/// @defgroup gtx_range GLM_GTX_range +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defines begin and end for vectors and matrices. Useful for range-based for loop. +/// The range is defined over the elements, not over columns or rows (e.g. mat4 has 16 elements). + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_range is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_range extension included") +# endif +#endif + +#include "../gtc/type_ptr.hpp" +#include "../gtc/vec1.hpp" + +namespace glm +{ + /// @addtogroup gtx_range + /// @{ + +# if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable : 4100) // unreferenced formal parameter +# endif + + template + inline length_t components(vec<1, T, Q> const& v) + { + return v.length(); + } + + template + inline length_t components(vec<2, T, Q> const& v) + { + return v.length(); + } + + template + inline length_t components(vec<3, T, Q> const& v) + { + return v.length(); + } + + template + inline length_t components(vec<4, T, Q> const& v) + { + return v.length(); + } + + template + inline length_t components(genType const& m) + { + return m.length() * m[0].length(); + } + + template + inline typename genType::value_type const * begin(genType const& v) + { + return value_ptr(v); + } + + template + inline typename genType::value_type const * end(genType const& v) + { + return begin(v) + components(v); + } + + template + inline typename genType::value_type * begin(genType& v) + { + return value_ptr(v); + } + + template + inline typename genType::value_type * end(genType& v) + { + return begin(v) + components(v); + } + +# if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/raw_data.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/raw_data.hpp new file mode 100644 index 000000000000..86cbe77d9ae5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/raw_data.hpp @@ -0,0 +1,51 @@ +/// @ref gtx_raw_data +/// @file glm/gtx/raw_data.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_raw_data GLM_GTX_raw_data +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Projection of a vector to other one + +#pragma once + +// Dependencies +#include "../ext/scalar_uint_sized.hpp" +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_raw_data is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_raw_data extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_raw_data + /// @{ + + //! Type for byte numbers. + //! From GLM_GTX_raw_data extension. + typedef detail::uint8 byte; + + //! Type for word numbers. + //! From GLM_GTX_raw_data extension. + typedef detail::uint16 word; + + //! Type for dword numbers. + //! From GLM_GTX_raw_data extension. + typedef detail::uint32 dword; + + //! Type for qword numbers. + //! From GLM_GTX_raw_data extension. + typedef detail::uint64 qword; + + /// @} +}// namespace glm + +#include "raw_data.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/raw_data.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/raw_data.inl new file mode 100644 index 000000000000..c740317d334e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/raw_data.inl @@ -0,0 +1,2 @@ +/// @ref gtx_raw_data + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_normalized_axis.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_normalized_axis.hpp new file mode 100644 index 000000000000..2103ca08f15e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_normalized_axis.hpp @@ -0,0 +1,68 @@ +/// @ref gtx_rotate_normalized_axis +/// @file glm/gtx/rotate_normalized_axis.hpp +/// +/// @see core (dependence) +/// @see gtc_matrix_transform +/// @see gtc_quaternion +/// +/// @defgroup gtx_rotate_normalized_axis GLM_GTX_rotate_normalized_axis +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Quaternions and matrices rotations around normalized axis. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/epsilon.hpp" +#include "../gtc/quaternion.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_rotate_normalized_axis is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_rotate_normalized_axis extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_rotate_normalized_axis + /// @{ + + /// Builds a rotation 4 * 4 matrix created from a normalized axis and an angle. + /// + /// @param m Input matrix multiplied by this rotation matrix. + /// @param angle Rotation angle expressed in radians. + /// @param axis Rotation axis, must be normalized. + /// @tparam T Value type used to build the matrix. Currently supported: half (not recommended), float or double. + /// + /// @see gtx_rotate_normalized_axis + /// @see - rotate(T angle, T x, T y, T z) + /// @see - rotate(mat<4, 4, T, Q> const& m, T angle, T x, T y, T z) + /// @see - rotate(T angle, vec<3, T, Q> const& v) + template + GLM_FUNC_DECL mat<4, 4, T, Q> rotateNormalizedAxis( + mat<4, 4, T, Q> const& m, + T const& angle, + vec<3, T, Q> const& axis); + + /// Rotates a quaternion from a vector of 3 components normalized axis and an angle. + /// + /// @param q Source orientation + /// @param angle Angle expressed in radians. + /// @param axis Normalized axis of the rotation, must be normalized. + /// + /// @see gtx_rotate_normalized_axis + template + GLM_FUNC_DECL qua rotateNormalizedAxis( + qua const& q, + T const& angle, + vec<3, T, Q> const& axis); + + /// @} +}//namespace glm + +#include "rotate_normalized_axis.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_normalized_axis.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_normalized_axis.inl new file mode 100644 index 000000000000..352a56cb17ac --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_normalized_axis.inl @@ -0,0 +1,58 @@ +/// @ref gtx_rotate_normalized_axis + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotateNormalizedAxis + ( + mat<4, 4, T, Q> const& m, + T const& angle, + vec<3, T, Q> const& v + ) + { + T const a = angle; + T const c = cos(a); + T const s = sin(a); + + vec<3, T, Q> const axis(v); + + vec<3, T, Q> const temp((static_cast(1) - c) * axis); + + mat<4, 4, T, Q> Rotate; + Rotate[0][0] = c + temp[0] * axis[0]; + Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2]; + Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1]; + + Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2]; + Rotate[1][1] = c + temp[1] * axis[1]; + Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0]; + + Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1]; + Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0]; + Rotate[2][2] = c + temp[2] * axis[2]; + + mat<4, 4, T, Q> Result; + Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; + Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; + Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; + Result[3] = m[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER qua rotateNormalizedAxis + ( + qua const& q, + T const& angle, + vec<3, T, Q> const& v + ) + { + vec<3, T, Q> const Tmp(v); + + T const AngleRad(angle); + T const Sin = sin(AngleRad * T(0.5)); + + return q * qua::wxyz(cos(AngleRad * static_cast(0.5)), Tmp.x * Sin, Tmp.y * Sin, Tmp.z * Sin); + //return gtc::quaternion::cross(q, tquat(cos(AngleRad * T(0.5)), Tmp.x * fSin, Tmp.y * fSin, Tmp.z * fSin)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_vector.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_vector.hpp new file mode 100644 index 000000000000..dcd5b95a6e5b --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_vector.hpp @@ -0,0 +1,123 @@ +/// @ref gtx_rotate_vector +/// @file glm/gtx/rotate_vector.hpp +/// +/// @see core (dependence) +/// @see gtx_transform (dependence) +/// +/// @defgroup gtx_rotate_vector GLM_GTX_rotate_vector +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Function to directly rotate a vector + +#pragma once + +// Dependency: +#include "../gtx/transform.hpp" +#include "../gtc/epsilon.hpp" +#include "../ext/vector_relational.hpp" +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_rotate_vector is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_rotate_vector extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_rotate_vector + /// @{ + + /// Returns Spherical interpolation between two vectors + /// + /// @param x A first vector + /// @param y A second vector + /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. + /// + /// @see gtx_rotate_vector + template + GLM_FUNC_DECL vec<3, T, Q> slerp( + vec<3, T, Q> const& x, + vec<3, T, Q> const& y, + T const& a); + + //! Rotate a two dimensional vector. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<2, T, Q> rotate( + vec<2, T, Q> const& v, + T const& angle); + + //! Rotate a three dimensional vector around an axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<3, T, Q> rotate( + vec<3, T, Q> const& v, + T const& angle, + vec<3, T, Q> const& normal); + + //! Rotate a four dimensional vector around an axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<4, T, Q> rotate( + vec<4, T, Q> const& v, + T const& angle, + vec<3, T, Q> const& normal); + + //! Rotate a three dimensional vector around the X axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<3, T, Q> rotateX( + vec<3, T, Q> const& v, + T const& angle); + + //! Rotate a three dimensional vector around the Y axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<3, T, Q> rotateY( + vec<3, T, Q> const& v, + T const& angle); + + //! Rotate a three dimensional vector around the Z axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<3, T, Q> rotateZ( + vec<3, T, Q> const& v, + T const& angle); + + //! Rotate a four dimensional vector around the X axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<4, T, Q> rotateX( + vec<4, T, Q> const& v, + T const& angle); + + //! Rotate a four dimensional vector around the Y axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<4, T, Q> rotateY( + vec<4, T, Q> const& v, + T const& angle); + + //! Rotate a four dimensional vector around the Z axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<4, T, Q> rotateZ( + vec<4, T, Q> const& v, + T const& angle); + + //! Build a rotation matrix from a normal and a up vector. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> orientation( + vec<3, T, Q> const& Normal, + vec<3, T, Q> const& Up); + + /// @} +}//namespace glm + +#include "rotate_vector.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_vector.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_vector.inl new file mode 100644 index 000000000000..f8136e765e05 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/rotate_vector.inl @@ -0,0 +1,187 @@ +/// @ref gtx_rotate_vector + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> slerp + ( + vec<3, T, Q> const& x, + vec<3, T, Q> const& y, + T const& a + ) + { + // get cosine of angle between vectors (-1 -> 1) + T CosAlpha = dot(x, y); + // get angle (0 -> pi) + T Alpha = acos(CosAlpha); + // get sine of angle between vectors (0 -> 1) + T SinAlpha = sin(Alpha); + // this breaks down when SinAlpha = 0, i.e. Alpha = 0 or pi + T t1 = sin((static_cast(1) - a) * Alpha) / SinAlpha; + T t2 = sin(a * Alpha) / SinAlpha; + + // interpolate src vectors + return x * t1 + y * t2; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> rotate + ( + vec<2, T, Q> const& v, + T const& angle + ) + { + vec<2, T, Q> Result; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.x = v.x * Cos - v.y * Sin; + Result.y = v.x * Sin + v.y * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotate + ( + vec<3, T, Q> const& v, + T const& angle, + vec<3, T, Q> const& normal + ) + { + return mat<3, 3, T, Q>(glm::rotate(angle, normal)) * v; + } + /* + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotateGTX( + const vec<3, T, Q>& x, + T angle, + const vec<3, T, Q>& normal) + { + const T Cos = cos(radians(angle)); + const T Sin = sin(radians(angle)); + return x * Cos + ((x * normal) * (T(1) - Cos)) * normal + cross(x, normal) * Sin; + } + */ + template + GLM_FUNC_QUALIFIER vec<4, T, Q> rotate + ( + vec<4, T, Q> const& v, + T const& angle, + vec<3, T, Q> const& normal + ) + { + return rotate(angle, normal) * v; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotateX + ( + vec<3, T, Q> const& v, + T const& angle + ) + { + vec<3, T, Q> Result(v); + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.y = v.y * Cos - v.z * Sin; + Result.z = v.y * Sin + v.z * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotateY + ( + vec<3, T, Q> const& v, + T const& angle + ) + { + vec<3, T, Q> Result = v; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.x = v.x * Cos + v.z * Sin; + Result.z = -v.x * Sin + v.z * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotateZ + ( + vec<3, T, Q> const& v, + T const& angle + ) + { + vec<3, T, Q> Result = v; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.x = v.x * Cos - v.y * Sin; + Result.y = v.x * Sin + v.y * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> rotateX + ( + vec<4, T, Q> const& v, + T const& angle + ) + { + vec<4, T, Q> Result = v; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.y = v.y * Cos - v.z * Sin; + Result.z = v.y * Sin + v.z * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> rotateY + ( + vec<4, T, Q> const& v, + T const& angle + ) + { + vec<4, T, Q> Result = v; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.x = v.x * Cos + v.z * Sin; + Result.z = -v.x * Sin + v.z * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> rotateZ + ( + vec<4, T, Q> const& v, + T const& angle + ) + { + vec<4, T, Q> Result = v; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.x = v.x * Cos - v.y * Sin; + Result.y = v.x * Sin + v.y * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> orientation + ( + vec<3, T, Q> const& Normal, + vec<3, T, Q> const& Up + ) + { + if(all(equal(Normal, Up, epsilon()))) + return mat<4, 4, T, Q>(static_cast(1)); + + vec<3, T, Q> RotationAxis = cross(Up, Normal); + T Angle = acos(dot(Normal, Up)); + + return rotate(Angle, RotationAxis); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_multiplication.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_multiplication.hpp new file mode 100644 index 000000000000..9f9f2fb3edbc --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_multiplication.hpp @@ -0,0 +1,82 @@ +/// @ref gtx_scalar_multiplication +/// @file glm/gtx/scalar_multiplication.hpp +/// @author Joshua Moerman +/// +/// @defgroup gtx_scalar_multiplication GLM_GTX_scalar_multiplication +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Enables scalar multiplication for all types +/// +/// Since GLSL is very strict about types, the following (often used) combinations do not work: +/// double * vec4 +/// int * vec4 +/// vec4 / int +/// So we'll fix that! Of course "float * vec4" should remain the same (hence the enable_if magic) + +#pragma once + +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_scalar_multiplication is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_scalar_multiplication extension included") +# endif +#endif + +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../mat2x2.hpp" +#include + +namespace glm +{ + /// @addtogroup gtx_scalar_multiplication + /// @{ + + template + using return_type_scalar_multiplication = typename std::enable_if< + !std::is_same::value // T may not be a float + && std::is_arithmetic::value, Vec // But it may be an int or double (no vec3 or mat3, ...) + >::type; + +#define GLM_IMPLEMENT_SCAL_MULT(Vec) \ + template \ + return_type_scalar_multiplication \ + operator*(T const& s, Vec rh){ \ + return rh *= static_cast(s); \ + } \ + \ + template \ + return_type_scalar_multiplication \ + operator*(Vec lh, T const& s){ \ + return lh *= static_cast(s); \ + } \ + \ + template \ + return_type_scalar_multiplication \ + operator/(Vec lh, T const& s){ \ + return lh *= 1.0f / static_cast(s); \ + } + +GLM_IMPLEMENT_SCAL_MULT(vec2) +GLM_IMPLEMENT_SCAL_MULT(vec3) +GLM_IMPLEMENT_SCAL_MULT(vec4) + +GLM_IMPLEMENT_SCAL_MULT(mat2) +GLM_IMPLEMENT_SCAL_MULT(mat2x3) +GLM_IMPLEMENT_SCAL_MULT(mat2x4) +GLM_IMPLEMENT_SCAL_MULT(mat3x2) +GLM_IMPLEMENT_SCAL_MULT(mat3) +GLM_IMPLEMENT_SCAL_MULT(mat3x4) +GLM_IMPLEMENT_SCAL_MULT(mat4x2) +GLM_IMPLEMENT_SCAL_MULT(mat4x3) +GLM_IMPLEMENT_SCAL_MULT(mat4) + +#undef GLM_IMPLEMENT_SCAL_MULT + /// @} +} // namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_relational.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_relational.hpp new file mode 100644 index 000000000000..8be9c57b8b35 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_relational.hpp @@ -0,0 +1,36 @@ +/// @ref gtx_scalar_relational +/// @file glm/gtx/scalar_relational.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_scalar_relational GLM_GTX_scalar_relational +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Extend a position from a source to a position at a defined length. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_extend is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_extend extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_scalar_relational + /// @{ + + + + /// @} +}//namespace glm + +#include "scalar_relational.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_relational.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_relational.inl new file mode 100644 index 000000000000..c2a121cff977 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/scalar_relational.inl @@ -0,0 +1,88 @@ +/// @ref gtx_scalar_relational + +namespace glm +{ + template + GLM_FUNC_QUALIFIER bool lessThan + ( + T const& x, + T const& y + ) + { + return x < y; + } + + template + GLM_FUNC_QUALIFIER bool lessThanEqual + ( + T const& x, + T const& y + ) + { + return x <= y; + } + + template + GLM_FUNC_QUALIFIER bool greaterThan + ( + T const& x, + T const& y + ) + { + return x > y; + } + + template + GLM_FUNC_QUALIFIER bool greaterThanEqual + ( + T const& x, + T const& y + ) + { + return x >= y; + } + + template + GLM_FUNC_QUALIFIER bool equal + ( + T const& x, + T const& y + ) + { + return detail::compute_equal::is_iec559>::call(x, y); + } + + template + GLM_FUNC_QUALIFIER bool notEqual + ( + T const& x, + T const& y + ) + { + return !detail::compute_equal::is_iec559>::call(x, y); + } + + GLM_FUNC_QUALIFIER bool any + ( + bool const& x + ) + { + return x; + } + + GLM_FUNC_QUALIFIER bool all + ( + bool const& x + ) + { + return x; + } + + GLM_FUNC_QUALIFIER bool not_ + ( + bool const& x + ) + { + return !x; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/spline.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/spline.hpp new file mode 100644 index 000000000000..731c979e358a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/spline.hpp @@ -0,0 +1,65 @@ +/// @ref gtx_spline +/// @file glm/gtx/spline.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_spline GLM_GTX_spline +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Spline functions + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/optimum_pow.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_spline is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_spline extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_spline + /// @{ + + /// Return a point from a catmull rom curve. + /// @see gtx_spline extension. + template + GLM_FUNC_DECL genType catmullRom( + genType const& v1, + genType const& v2, + genType const& v3, + genType const& v4, + typename genType::value_type const& s); + + /// Return a point from a hermite curve. + /// @see gtx_spline extension. + template + GLM_FUNC_DECL genType hermite( + genType const& v1, + genType const& t1, + genType const& v2, + genType const& t2, + typename genType::value_type const& s); + + /// Return a point from a cubic curve. + /// @see gtx_spline extension. + template + GLM_FUNC_DECL genType cubic( + genType const& v1, + genType const& v2, + genType const& v3, + genType const& v4, + typename genType::value_type const& s); + + /// @} +}//namespace glm + +#include "spline.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/spline.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/spline.inl new file mode 100644 index 000000000000..c3fd05656291 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/spline.inl @@ -0,0 +1,60 @@ +/// @ref gtx_spline + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType catmullRom + ( + genType const& v1, + genType const& v2, + genType const& v3, + genType const& v4, + typename genType::value_type const& s + ) + { + typename genType::value_type s2 = pow2(s); + typename genType::value_type s3 = pow3(s); + + typename genType::value_type f1 = -s3 + typename genType::value_type(2) * s2 - s; + typename genType::value_type f2 = typename genType::value_type(3) * s3 - typename genType::value_type(5) * s2 + typename genType::value_type(2); + typename genType::value_type f3 = typename genType::value_type(-3) * s3 + typename genType::value_type(4) * s2 + s; + typename genType::value_type f4 = s3 - s2; + + return (f1 * v1 + f2 * v2 + f3 * v3 + f4 * v4) / typename genType::value_type(2); + + } + + template + GLM_FUNC_QUALIFIER genType hermite + ( + genType const& v1, + genType const& t1, + genType const& v2, + genType const& t2, + typename genType::value_type const& s + ) + { + typename genType::value_type s2 = pow2(s); + typename genType::value_type s3 = pow3(s); + + typename genType::value_type f1 = typename genType::value_type(2) * s3 - typename genType::value_type(3) * s2 + typename genType::value_type(1); + typename genType::value_type f2 = typename genType::value_type(-2) * s3 + typename genType::value_type(3) * s2; + typename genType::value_type f3 = s3 - typename genType::value_type(2) * s2 + s; + typename genType::value_type f4 = s3 - s2; + + return f1 * v1 + f2 * v2 + f3 * t1 + f4 * t2; + } + + template + GLM_FUNC_QUALIFIER genType cubic + ( + genType const& v1, + genType const& v2, + genType const& v3, + genType const& v4, + typename genType::value_type const& s + ) + { + return ((v1 * s + v2) * s + v3) * s + v4; + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/std_based_type.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/std_based_type.hpp new file mode 100644 index 000000000000..cd3be8cb7892 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/std_based_type.hpp @@ -0,0 +1,68 @@ +/// @ref gtx_std_based_type +/// @file glm/gtx/std_based_type.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_std_based_type GLM_GTX_std_based_type +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Adds vector types based on STL value types. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_std_based_type is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_std_based_type extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_std_based_type + /// @{ + + /// Vector type based of one std::size_t component. + /// @see GLM_GTX_std_based_type + typedef vec<1, std::size_t, defaultp> size1; + + /// Vector type based of two std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<2, std::size_t, defaultp> size2; + + /// Vector type based of three std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<3, std::size_t, defaultp> size3; + + /// Vector type based of four std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<4, std::size_t, defaultp> size4; + + /// Vector type based of one std::size_t component. + /// @see GLM_GTX_std_based_type + typedef vec<1, std::size_t, defaultp> size1_t; + + /// Vector type based of two std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<2, std::size_t, defaultp> size2_t; + + /// Vector type based of three std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<3, std::size_t, defaultp> size3_t; + + /// Vector type based of four std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<4, std::size_t, defaultp> size4_t; + + /// @} +}//namespace glm + +#include "std_based_type.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/std_based_type.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/std_based_type.inl new file mode 100644 index 000000000000..9c34bdb6e0f7 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/std_based_type.inl @@ -0,0 +1,6 @@ +/// @ref gtx_std_based_type + +namespace glm +{ + +} diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/string_cast.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/string_cast.hpp new file mode 100644 index 000000000000..71f6ece4c9f1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/string_cast.hpp @@ -0,0 +1,46 @@ +/// @ref gtx_string_cast +/// @file glm/gtx/string_cast.hpp +/// +/// @see core (dependence) +/// @see gtx_integer (dependence) +/// @see gtx_quaternion (dependence) +/// +/// @defgroup gtx_string_cast GLM_GTX_string_cast +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Setup strings for GLM type values + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/type_precision.hpp" +#include "../gtc/quaternion.hpp" +#include "../gtx/dual_quaternion.hpp" +#include +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_string_cast is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_string_cast extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_string_cast + /// @{ + + /// Create a string from a GLM vector or matrix typed variable. + /// @see gtx_string_cast extension. + template + GLM_FUNC_DECL std::string to_string(genType const& x); + + /// @} +}//namespace glm + +#include "string_cast.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/string_cast.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/string_cast.inl new file mode 100644 index 000000000000..0b2d4b6aebc6 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/string_cast.inl @@ -0,0 +1,492 @@ +/// @ref gtx_string_cast + +#include +#include + +namespace glm{ +namespace detail +{ + template + struct cast + { + typedef T value_type; + }; + + template <> + struct cast + { + typedef double value_type; + }; + + GLM_FUNC_QUALIFIER std::string format(const char* msg, ...) + { + std::size_t const STRING_BUFFER(4096); + char text[STRING_BUFFER]; + va_list list; + + if(msg == GLM_NULLPTR) + return std::string(); + + va_start(list, msg); +# if (GLM_COMPILER & GLM_COMPILER_VC) + vsprintf_s(text, STRING_BUFFER, msg, list); +# else// + std::vsnprintf(text, STRING_BUFFER, msg, list); +# endif// + va_end(list); + + return std::string(text); + } + + static const char* LabelTrue = "true"; + static const char* LabelFalse = "false"; + + template + struct literal + { + GLM_FUNC_QUALIFIER static char const * value() {return "%d";} + }; + + template + struct literal + { + GLM_FUNC_QUALIFIER static char const * value() {return "%f";} + }; + +# if GLM_MODEL == GLM_MODEL_32 && GLM_COMPILER && GLM_COMPILER_VC + template<> + struct literal + { + GLM_FUNC_QUALIFIER static char const * value() {return "%lld";} + }; + + template<> + struct literal + { + GLM_FUNC_QUALIFIER static char const * value() {return "%lld";} + }; +# endif//GLM_MODEL == GLM_MODEL_32 && GLM_COMPILER && GLM_COMPILER_VC + + template + struct prefix{}; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "d";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "b";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "u8";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "i8";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "u16";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "i16";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "u";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "i";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "u64";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "i64";} + }; + + template + struct compute_to_string + {}; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<1, bool, Q> const& x) + { + return detail::format("bvec1(%s)", + x[0] ? detail::LabelTrue : detail::LabelFalse); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<2, bool, Q> const& x) + { + return detail::format("bvec2(%s, %s)", + x[0] ? detail::LabelTrue : detail::LabelFalse, + x[1] ? detail::LabelTrue : detail::LabelFalse); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<3, bool, Q> const& x) + { + return detail::format("bvec3(%s, %s, %s)", + x[0] ? detail::LabelTrue : detail::LabelFalse, + x[1] ? detail::LabelTrue : detail::LabelFalse, + x[2] ? detail::LabelTrue : detail::LabelFalse); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<4, bool, Q> const& x) + { + return detail::format("bvec4(%s, %s, %s, %s)", + x[0] ? detail::LabelTrue : detail::LabelFalse, + x[1] ? detail::LabelTrue : detail::LabelFalse, + x[2] ? detail::LabelTrue : detail::LabelFalse, + x[3] ? detail::LabelTrue : detail::LabelFalse); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<1, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%svec1(%s)", + PrefixStr, + LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<2, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%svec2(%s, %s)", + PrefixStr, + LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0]), + static_cast::value_type>(x[1])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<3, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%svec3(%s, %s, %s)", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0]), + static_cast::value_type>(x[1]), + static_cast::value_type>(x[2])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<4, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%svec4(%s, %s, %s, %s)", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0]), + static_cast::value_type>(x[1]), + static_cast::value_type>(x[2]), + static_cast::value_type>(x[3])); + } + }; + + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<2, 2, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat2x2((%s, %s), (%s, %s))", + PrefixStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<2, 3, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat2x3((%s, %s, %s), (%s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<2, 4, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat2x4((%s, %s, %s, %s), (%s, %s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<3, 2, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat3x2((%s, %s), (%s, %s), (%s, %s))", + PrefixStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<3, 3, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat3x3((%s, %s, %s), (%s, %s, %s), (%s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<3, 4, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat3x4((%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), static_cast::value_type>(x[2][3])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<4, 2, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat4x2((%s, %s), (%s, %s), (%s, %s), (%s, %s))", + PrefixStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), + static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<4, 3, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat4x3((%s, %s, %s), (%s, %s, %s), (%s, %s, %s), (%s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), + static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1]), static_cast::value_type>(x[3][2])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<4, 4, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat4x4((%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), static_cast::value_type>(x[2][3]), + static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1]), static_cast::value_type>(x[3][2]), static_cast::value_type>(x[3][3])); + } + }; + + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(qua const& q) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%squat(%s, {%s, %s, %s})", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(q.w), + static_cast::value_type>(q.x), + static_cast::value_type>(q.y), + static_cast::value_type>(q.z)); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(tdualquat const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%sdualquat((%s, {%s, %s, %s}), (%s, {%s, %s, %s}))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x.real.w), + static_cast::value_type>(x.real.x), + static_cast::value_type>(x.real.y), + static_cast::value_type>(x.real.z), + static_cast::value_type>(x.dual.w), + static_cast::value_type>(x.dual.x), + static_cast::value_type>(x.dual.y), + static_cast::value_type>(x.dual.z)); + } + }; + +}//namespace detail + +template +GLM_FUNC_QUALIFIER std::string to_string(matType const& x) +{ + return detail::compute_to_string::call(x); +} + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/texture.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/texture.hpp new file mode 100644 index 000000000000..20585e68ce11 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/texture.hpp @@ -0,0 +1,46 @@ +/// @ref gtx_texture +/// @file glm/gtx/texture.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_texture GLM_GTX_texture +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Wrapping mode of texture coordinates. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/integer.hpp" +#include "../gtx/component_wise.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_texture is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_texture extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_texture + /// @{ + + /// Compute the number of mipmaps levels necessary to create a mipmap complete texture + /// + /// @param Extent Extent of the texture base level mipmap + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + template + T levels(vec const& Extent); + + /// @} +}// namespace glm + +#include "texture.inl" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/texture.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/texture.inl new file mode 100644 index 000000000000..593c826141b0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/texture.inl @@ -0,0 +1,17 @@ +/// @ref gtx_texture + +namespace glm +{ + template + inline T levels(vec const& Extent) + { + return glm::log2(compMax(Extent)) + static_cast(1); + } + + template + inline T levels(T Extent) + { + return vec<1, T, defaultp>(Extent).x; + } +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/transform.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/transform.hpp new file mode 100644 index 000000000000..0279fc8bd329 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/transform.hpp @@ -0,0 +1,60 @@ +/// @ref gtx_transform +/// @file glm/gtx/transform.hpp +/// +/// @see core (dependence) +/// @see gtc_matrix_transform (dependence) +/// @see gtx_transform +/// @see gtx_transform2 +/// +/// @defgroup gtx_transform GLM_GTX_transform +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Add transformation matrices + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/matrix_transform.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_transform is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_transform extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_transform + /// @{ + + /// Transforms a matrix with a translation 4 * 4 matrix created from 3 scalars. + /// @see gtc_matrix_transform + /// @see gtx_transform + template + GLM_FUNC_DECL mat<4, 4, T, Q> translate( + vec<3, T, Q> const& v); + + /// Builds a rotation 4 * 4 matrix created from an axis of 3 scalars and an angle expressed in radians. + /// @see gtc_matrix_transform + /// @see gtx_transform + template + GLM_FUNC_DECL mat<4, 4, T, Q> rotate( + T angle, + vec<3, T, Q> const& v); + + /// Transforms a matrix with a scale 4 * 4 matrix created from a vector of 3 components. + /// @see gtc_matrix_transform + /// @see gtx_transform + template + GLM_FUNC_DECL mat<4, 4, T, Q> scale( + vec<3, T, Q> const& v); + + /// @} +}// namespace glm + +#include "transform.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/transform.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/transform.inl new file mode 100644 index 000000000000..48ee6801b651 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/transform.inl @@ -0,0 +1,23 @@ +/// @ref gtx_transform + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> translate(vec<3, T, Q> const& v) + { + return translate(mat<4, 4, T, Q>(static_cast(1)), v); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate(T angle, vec<3, T, Q> const& v) + { + return rotate(mat<4, 4, T, Q>(static_cast(1)), angle, v); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale(vec<3, T, Q> const& v) + { + return scale(mat<4, 4, T, Q>(static_cast(1)), v); + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/transform2.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/transform2.hpp new file mode 100644 index 000000000000..0d8ba9d90bc5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/transform2.hpp @@ -0,0 +1,89 @@ +/// @ref gtx_transform2 +/// @file glm/gtx/transform2.hpp +/// +/// @see core (dependence) +/// @see gtx_transform (dependence) +/// +/// @defgroup gtx_transform2 GLM_GTX_transform2 +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Add extra transformation matrices + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/transform.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_transform2 is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_transform2 extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_transform2 + /// @{ + + //! Transforms a matrix with a shearing on X axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> shearX2D(mat<3, 3, T, Q> const& m, T y); + + //! Transforms a matrix with a shearing on Y axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> shearY2D(mat<3, 3, T, Q> const& m, T x); + + //! Transforms a matrix with a shearing on X axis + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> shearX3D(mat<4, 4, T, Q> const& m, T y, T z); + + //! Transforms a matrix with a shearing on Y axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> shearY3D(mat<4, 4, T, Q> const& m, T x, T z); + + //! Transforms a matrix with a shearing on Z axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> shearZ3D(mat<4, 4, T, Q> const& m, T x, T y); + + //template GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear(const mat<4, 4, T, Q> & m, shearPlane, planePoint, angle) + // Identity + tan(angle) * cross(Normal, OnPlaneVector) 0 + // - dot(PointOnPlane, normal) * OnPlaneVector 1 + + // Reflect functions seem to don't work + //template mat<3, 3, T, Q> reflect2D(const mat<3, 3, T, Q> & m, const vec<3, T, Q>& normal){return reflect2DGTX(m, normal);} //!< \brief Build a reflection matrix (from GLM_GTX_transform2 extension) + //template mat<4, 4, T, Q> reflect3D(const mat<4, 4, T, Q> & m, const vec<3, T, Q>& normal){return reflect3DGTX(m, normal);} //!< \brief Build a reflection matrix (from GLM_GTX_transform2 extension) + + //! Build planar projection matrix along normal axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> proj2D(mat<3, 3, T, Q> const& m, vec<3, T, Q> const& normal); + + //! Build planar projection matrix along normal axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> proj3D(mat<4, 4, T, Q> const & m, vec<3, T, Q> const& normal); + + //! Build a scale bias matrix. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> scaleBias(T scale, T bias); + + //! Build a scale bias matrix. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> scaleBias(mat<4, 4, T, Q> const& m, T scale, T bias); + + /// @} +}// namespace glm + +#include "transform2.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/transform2.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/transform2.inl new file mode 100644 index 000000000000..0118ab09603a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/transform2.inl @@ -0,0 +1,125 @@ +/// @ref gtx_transform2 + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX2D(mat<3, 3, T, Q> const& m, T s) + { + mat<3, 3, T, Q> r(1); + r[1][0] = s; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY2D(mat<3, 3, T, Q> const& m, T s) + { + mat<3, 3, T, Q> r(1); + r[0][1] = s; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearX3D(mat<4, 4, T, Q> const& m, T s, T t) + { + mat<4, 4, T, Q> r(1); + r[0][1] = s; + r[0][2] = t; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearY3D(mat<4, 4, T, Q> const& m, T s, T t) + { + mat<4, 4, T, Q> r(1); + r[1][0] = s; + r[1][2] = t; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearZ3D(mat<4, 4, T, Q> const& m, T s, T t) + { + mat<4, 4, T, Q> r(1); + r[2][0] = s; + r[2][1] = t; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> reflect2D(mat<3, 3, T, Q> const& m, vec<3, T, Q> const& normal) + { + mat<3, 3, T, Q> r(static_cast(1)); + r[0][0] = static_cast(1) - static_cast(2) * normal.x * normal.x; + r[0][1] = -static_cast(2) * normal.x * normal.y; + r[1][0] = -static_cast(2) * normal.x * normal.y; + r[1][1] = static_cast(1) - static_cast(2) * normal.y * normal.y; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> reflect3D(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& normal) + { + mat<4, 4, T, Q> r(static_cast(1)); + r[0][0] = static_cast(1) - static_cast(2) * normal.x * normal.x; + r[0][1] = -static_cast(2) * normal.x * normal.y; + r[0][2] = -static_cast(2) * normal.x * normal.z; + + r[1][0] = -static_cast(2) * normal.x * normal.y; + r[1][1] = static_cast(1) - static_cast(2) * normal.y * normal.y; + r[1][2] = -static_cast(2) * normal.y * normal.z; + + r[2][0] = -static_cast(2) * normal.x * normal.z; + r[2][1] = -static_cast(2) * normal.y * normal.z; + r[2][2] = static_cast(1) - static_cast(2) * normal.z * normal.z; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> proj2D( + const mat<3, 3, T, Q>& m, + const vec<3, T, Q>& normal) + { + mat<3, 3, T, Q> r(static_cast(1)); + r[0][0] = static_cast(1) - normal.x * normal.x; + r[0][1] = - normal.x * normal.y; + r[1][0] = - normal.x * normal.y; + r[1][1] = static_cast(1) - normal.y * normal.y; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> proj3D( + const mat<4, 4, T, Q>& m, + const vec<3, T, Q>& normal) + { + mat<4, 4, T, Q> r(static_cast(1)); + r[0][0] = static_cast(1) - normal.x * normal.x; + r[0][1] = - normal.x * normal.y; + r[0][2] = - normal.x * normal.z; + r[1][0] = - normal.x * normal.y; + r[1][1] = static_cast(1) - normal.y * normal.y; + r[1][2] = - normal.y * normal.z; + r[2][0] = - normal.x * normal.z; + r[2][1] = - normal.y * normal.z; + r[2][2] = static_cast(1) - normal.z * normal.z; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scaleBias(T scale, T bias) + { + mat<4, 4, T, Q> result; + result[3] = vec<4, T, Q>(vec<3, T, Q>(bias), static_cast(1)); + result[0][0] = scale; + result[1][1] = scale; + result[2][2] = scale; + return result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scaleBias(mat<4, 4, T, Q> const& m, T scale, T bias) + { + return m * scaleBias(scale, bias); + } +}//namespace glm + diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/type_aligned.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/type_aligned.hpp new file mode 100644 index 000000000000..2ae522c1fc7e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/type_aligned.hpp @@ -0,0 +1,982 @@ +/// @ref gtx_type_aligned +/// @file glm/gtx/type_aligned.hpp +/// +/// @see core (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtx_type_aligned GLM_GTX_type_aligned +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defines aligned types. + +#pragma once + +// Dependency: +#include "../gtc/type_precision.hpp" +#include "../gtc/quaternion.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_type_aligned is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_type_aligned extension included") +# endif +#endif + +namespace glm +{ + /////////////////////////// + // Signed int vector types + + /// @addtogroup gtx_type_aligned + /// @{ + + /// Low qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int8, aligned_lowp_int8, 1); + + /// Low qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int16, aligned_lowp_int16, 2); + + /// Low qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int32, aligned_lowp_int32, 4); + + /// Low qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int64, aligned_lowp_int64, 8); + + + /// Low qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int8_t, aligned_lowp_int8_t, 1); + + /// Low qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int16_t, aligned_lowp_int16_t, 2); + + /// Low qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int32_t, aligned_lowp_int32_t, 4); + + /// Low qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int64_t, aligned_lowp_int64_t, 8); + + + /// Low qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_i8, aligned_lowp_i8, 1); + + /// Low qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_i16, aligned_lowp_i16, 2); + + /// Low qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_i32, aligned_lowp_i32, 4); + + /// Low qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_i64, aligned_lowp_i64, 8); + + + /// Medium qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int8, aligned_mediump_int8, 1); + + /// Medium qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int16, aligned_mediump_int16, 2); + + /// Medium qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int32, aligned_mediump_int32, 4); + + /// Medium qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int64, aligned_mediump_int64, 8); + + + /// Medium qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int8_t, aligned_mediump_int8_t, 1); + + /// Medium qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int16_t, aligned_mediump_int16_t, 2); + + /// Medium qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int32_t, aligned_mediump_int32_t, 4); + + /// Medium qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int64_t, aligned_mediump_int64_t, 8); + + + /// Medium qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_i8, aligned_mediump_i8, 1); + + /// Medium qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_i16, aligned_mediump_i16, 2); + + /// Medium qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_i32, aligned_mediump_i32, 4); + + /// Medium qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_i64, aligned_mediump_i64, 8); + + + /// High qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int8, aligned_highp_int8, 1); + + /// High qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int16, aligned_highp_int16, 2); + + /// High qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int32, aligned_highp_int32, 4); + + /// High qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int64, aligned_highp_int64, 8); + + + /// High qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int8_t, aligned_highp_int8_t, 1); + + /// High qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int16_t, aligned_highp_int16_t, 2); + + /// High qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int32_t, aligned_highp_int32_t, 4); + + /// High qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int64_t, aligned_highp_int64_t, 8); + + + /// High qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_i8, aligned_highp_i8, 1); + + /// High qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_i16, aligned_highp_i16, 2); + + /// High qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_i32, aligned_highp_i32, 4); + + /// High qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_i64, aligned_highp_i64, 8); + + + /// Default qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int8, aligned_int8, 1); + + /// Default qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int16, aligned_int16, 2); + + /// Default qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int32, aligned_int32, 4); + + /// Default qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int64, aligned_int64, 8); + + + /// Default qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int8_t, aligned_int8_t, 1); + + /// Default qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int16_t, aligned_int16_t, 2); + + /// Default qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int32_t, aligned_int32_t, 4); + + /// Default qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int64_t, aligned_int64_t, 8); + + + /// Default qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i8, aligned_i8, 1); + + /// Default qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i16, aligned_i16, 2); + + /// Default qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i32, aligned_i32, 4); + + /// Default qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i64, aligned_i64, 8); + + + /// Default qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(ivec1, aligned_ivec1, 4); + + /// Default qualifier 32 bit signed integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(ivec2, aligned_ivec2, 8); + + /// Default qualifier 32 bit signed integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(ivec3, aligned_ivec3, 16); + + /// Default qualifier 32 bit signed integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(ivec4, aligned_ivec4, 16); + + + /// Default qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i8vec1, aligned_i8vec1, 1); + + /// Default qualifier 8 bit signed integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i8vec2, aligned_i8vec2, 2); + + /// Default qualifier 8 bit signed integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i8vec3, aligned_i8vec3, 4); + + /// Default qualifier 8 bit signed integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i8vec4, aligned_i8vec4, 4); + + + /// Default qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i16vec1, aligned_i16vec1, 2); + + /// Default qualifier 16 bit signed integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i16vec2, aligned_i16vec2, 4); + + /// Default qualifier 16 bit signed integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i16vec3, aligned_i16vec3, 8); + + /// Default qualifier 16 bit signed integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i16vec4, aligned_i16vec4, 8); + + + /// Default qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i32vec1, aligned_i32vec1, 4); + + /// Default qualifier 32 bit signed integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i32vec2, aligned_i32vec2, 8); + + /// Default qualifier 32 bit signed integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i32vec3, aligned_i32vec3, 16); + + /// Default qualifier 32 bit signed integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i32vec4, aligned_i32vec4, 16); + + + /// Default qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i64vec1, aligned_i64vec1, 8); + + /// Default qualifier 64 bit signed integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i64vec2, aligned_i64vec2, 16); + + /// Default qualifier 64 bit signed integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i64vec3, aligned_i64vec3, 32); + + /// Default qualifier 64 bit signed integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i64vec4, aligned_i64vec4, 32); + + + ///////////////////////////// + // Unsigned int vector types + + /// Low qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint8, aligned_lowp_uint8, 1); + + /// Low qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint16, aligned_lowp_uint16, 2); + + /// Low qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint32, aligned_lowp_uint32, 4); + + /// Low qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint64, aligned_lowp_uint64, 8); + + + /// Low qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint8_t, aligned_lowp_uint8_t, 1); + + /// Low qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint16_t, aligned_lowp_uint16_t, 2); + + /// Low qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint32_t, aligned_lowp_uint32_t, 4); + + /// Low qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint64_t, aligned_lowp_uint64_t, 8); + + + /// Low qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_u8, aligned_lowp_u8, 1); + + /// Low qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_u16, aligned_lowp_u16, 2); + + /// Low qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_u32, aligned_lowp_u32, 4); + + /// Low qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_u64, aligned_lowp_u64, 8); + + + /// Medium qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint8, aligned_mediump_uint8, 1); + + /// Medium qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint16, aligned_mediump_uint16, 2); + + /// Medium qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint32, aligned_mediump_uint32, 4); + + /// Medium qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint64, aligned_mediump_uint64, 8); + + + /// Medium qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint8_t, aligned_mediump_uint8_t, 1); + + /// Medium qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint16_t, aligned_mediump_uint16_t, 2); + + /// Medium qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint32_t, aligned_mediump_uint32_t, 4); + + /// Medium qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint64_t, aligned_mediump_uint64_t, 8); + + + /// Medium qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_u8, aligned_mediump_u8, 1); + + /// Medium qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_u16, aligned_mediump_u16, 2); + + /// Medium qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_u32, aligned_mediump_u32, 4); + + /// Medium qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_u64, aligned_mediump_u64, 8); + + + /// High qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint8, aligned_highp_uint8, 1); + + /// High qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint16, aligned_highp_uint16, 2); + + /// High qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint32, aligned_highp_uint32, 4); + + /// High qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint64, aligned_highp_uint64, 8); + + + /// High qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint8_t, aligned_highp_uint8_t, 1); + + /// High qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint16_t, aligned_highp_uint16_t, 2); + + /// High qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint32_t, aligned_highp_uint32_t, 4); + + /// High qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint64_t, aligned_highp_uint64_t, 8); + + + /// High qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_u8, aligned_highp_u8, 1); + + /// High qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_u16, aligned_highp_u16, 2); + + /// High qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_u32, aligned_highp_u32, 4); + + /// High qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_u64, aligned_highp_u64, 8); + + + /// Default qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint8, aligned_uint8, 1); + + /// Default qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint16, aligned_uint16, 2); + + /// Default qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint32, aligned_uint32, 4); + + /// Default qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint64, aligned_uint64, 8); + + + /// Default qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint8_t, aligned_uint8_t, 1); + + /// Default qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint16_t, aligned_uint16_t, 2); + + /// Default qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint32_t, aligned_uint32_t, 4); + + /// Default qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint64_t, aligned_uint64_t, 8); + + + /// Default qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u8, aligned_u8, 1); + + /// Default qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u16, aligned_u16, 2); + + /// Default qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u32, aligned_u32, 4); + + /// Default qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u64, aligned_u64, 8); + + + /// Default qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uvec1, aligned_uvec1, 4); + + /// Default qualifier 32 bit unsigned integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uvec2, aligned_uvec2, 8); + + /// Default qualifier 32 bit unsigned integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uvec3, aligned_uvec3, 16); + + /// Default qualifier 32 bit unsigned integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uvec4, aligned_uvec4, 16); + + + /// Default qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u8vec1, aligned_u8vec1, 1); + + /// Default qualifier 8 bit unsigned integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u8vec2, aligned_u8vec2, 2); + + /// Default qualifier 8 bit unsigned integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u8vec3, aligned_u8vec3, 4); + + /// Default qualifier 8 bit unsigned integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u8vec4, aligned_u8vec4, 4); + + + /// Default qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u16vec1, aligned_u16vec1, 2); + + /// Default qualifier 16 bit unsigned integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u16vec2, aligned_u16vec2, 4); + + /// Default qualifier 16 bit unsigned integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u16vec3, aligned_u16vec3, 8); + + /// Default qualifier 16 bit unsigned integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u16vec4, aligned_u16vec4, 8); + + + /// Default qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u32vec1, aligned_u32vec1, 4); + + /// Default qualifier 32 bit unsigned integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u32vec2, aligned_u32vec2, 8); + + /// Default qualifier 32 bit unsigned integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u32vec3, aligned_u32vec3, 16); + + /// Default qualifier 32 bit unsigned integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u32vec4, aligned_u32vec4, 16); + + + /// Default qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u64vec1, aligned_u64vec1, 8); + + /// Default qualifier 64 bit unsigned integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u64vec2, aligned_u64vec2, 16); + + /// Default qualifier 64 bit unsigned integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u64vec3, aligned_u64vec3, 32); + + /// Default qualifier 64 bit unsigned integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u64vec4, aligned_u64vec4, 32); + + + ////////////////////// + // Float vector types + + /// 32 bit single-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float32, aligned_float32, 4); + + /// 32 bit single-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float32_t, aligned_float32_t, 4); + + /// 32 bit single-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float32, aligned_f32, 4); + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// 64 bit double-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float64, aligned_float64, 8); + + /// 64 bit double-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float64_t, aligned_float64_t, 8); + + /// 64 bit double-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float64, aligned_f64, 8); + +# endif//GLM_FORCE_SINGLE_ONLY + + + /// Single-qualifier floating-point aligned vector of 1 component. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(vec1, aligned_vec1, 4); + + /// Single-qualifier floating-point aligned vector of 2 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(vec2, aligned_vec2, 8); + + /// Single-qualifier floating-point aligned vector of 3 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(vec3, aligned_vec3, 16); + + /// Single-qualifier floating-point aligned vector of 4 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(vec4, aligned_vec4, 16); + + + /// Single-qualifier floating-point aligned vector of 1 component. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fvec1, aligned_fvec1, 4); + + /// Single-qualifier floating-point aligned vector of 2 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fvec2, aligned_fvec2, 8); + + /// Single-qualifier floating-point aligned vector of 3 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fvec3, aligned_fvec3, 16); + + /// Single-qualifier floating-point aligned vector of 4 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fvec4, aligned_fvec4, 16); + + + /// Single-qualifier floating-point aligned vector of 1 component. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32vec1, aligned_f32vec1, 4); + + /// Single-qualifier floating-point aligned vector of 2 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32vec2, aligned_f32vec2, 8); + + /// Single-qualifier floating-point aligned vector of 3 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32vec3, aligned_f32vec3, 16); + + /// Single-qualifier floating-point aligned vector of 4 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32vec4, aligned_f32vec4, 16); + + + /// Double-qualifier floating-point aligned vector of 1 component. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(dvec1, aligned_dvec1, 8); + + /// Double-qualifier floating-point aligned vector of 2 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(dvec2, aligned_dvec2, 16); + + /// Double-qualifier floating-point aligned vector of 3 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(dvec3, aligned_dvec3, 32); + + /// Double-qualifier floating-point aligned vector of 4 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(dvec4, aligned_dvec4, 32); + + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// Double-qualifier floating-point aligned vector of 1 component. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64vec1, aligned_f64vec1, 8); + + /// Double-qualifier floating-point aligned vector of 2 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64vec2, aligned_f64vec2, 16); + + /// Double-qualifier floating-point aligned vector of 3 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64vec3, aligned_f64vec3, 32); + + /// Double-qualifier floating-point aligned vector of 4 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64vec4, aligned_f64vec4, 32); + +# endif//GLM_FORCE_SINGLE_ONLY + + ////////////////////// + // Float matrix types + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef detail::tmat1 mat1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat2, aligned_mat2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat3, aligned_mat3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat4, aligned_mat4, 16); + + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef detail::tmat1x1 mat1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat2x2, aligned_mat2x2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat3x3, aligned_mat3x3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat4x4, aligned_mat4x4, 16); + + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef detail::tmat1x1 fmat1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat2x2, aligned_fmat2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat3x3, aligned_fmat3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat4x4, aligned_fmat4, 16); + + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef f32 fmat1x1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat2x2, aligned_fmat2x2, 16); + + /// Single-qualifier floating-point aligned 2x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat2x3, aligned_fmat2x3, 16); + + /// Single-qualifier floating-point aligned 2x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat2x4, aligned_fmat2x4, 16); + + /// Single-qualifier floating-point aligned 3x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat3x2, aligned_fmat3x2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat3x3, aligned_fmat3x3, 16); + + /// Single-qualifier floating-point aligned 3x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat3x4, aligned_fmat3x4, 16); + + /// Single-qualifier floating-point aligned 4x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat4x2, aligned_fmat4x2, 16); + + /// Single-qualifier floating-point aligned 4x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat4x3, aligned_fmat4x3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat4x4, aligned_fmat4x4, 16); + + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef detail::tmat1x1 f32mat1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat2x2, aligned_f32mat2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat3x3, aligned_f32mat3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat4x4, aligned_f32mat4, 16); + + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef f32 f32mat1x1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat2x2, aligned_f32mat2x2, 16); + + /// Single-qualifier floating-point aligned 2x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat2x3, aligned_f32mat2x3, 16); + + /// Single-qualifier floating-point aligned 2x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat2x4, aligned_f32mat2x4, 16); + + /// Single-qualifier floating-point aligned 3x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat3x2, aligned_f32mat3x2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat3x3, aligned_f32mat3x3, 16); + + /// Single-qualifier floating-point aligned 3x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat3x4, aligned_f32mat3x4, 16); + + /// Single-qualifier floating-point aligned 4x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat4x2, aligned_f32mat4x2, 16); + + /// Single-qualifier floating-point aligned 4x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat4x3, aligned_f32mat4x3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat4x4, aligned_f32mat4x4, 16); + + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// Double-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef detail::tmat1x1 f64mat1; + + /// Double-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat2x2, aligned_f64mat2, 32); + + /// Double-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat3x3, aligned_f64mat3, 32); + + /// Double-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat4x4, aligned_f64mat4, 32); + + + /// Double-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef f64 f64mat1x1; + + /// Double-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat2x2, aligned_f64mat2x2, 32); + + /// Double-qualifier floating-point aligned 2x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat2x3, aligned_f64mat2x3, 32); + + /// Double-qualifier floating-point aligned 2x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat2x4, aligned_f64mat2x4, 32); + + /// Double-qualifier floating-point aligned 3x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat3x2, aligned_f64mat3x2, 32); + + /// Double-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat3x3, aligned_f64mat3x3, 32); + + /// Double-qualifier floating-point aligned 3x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat3x4, aligned_f64mat3x4, 32); + + /// Double-qualifier floating-point aligned 4x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat4x2, aligned_f64mat4x2, 32); + + /// Double-qualifier floating-point aligned 4x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat4x3, aligned_f64mat4x3, 32); + + /// Double-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat4x4, aligned_f64mat4x4, 32); + +# endif//GLM_FORCE_SINGLE_ONLY + + + ////////////////////////// + // Quaternion types + + /// Single-qualifier floating-point aligned quaternion. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(quat, aligned_quat, 16); + + /// Single-qualifier floating-point aligned quaternion. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(quat, aligned_fquat, 16); + + /// Double-qualifier floating-point aligned quaternion. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(dquat, aligned_dquat, 32); + + /// Single-qualifier floating-point aligned quaternion. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32quat, aligned_f32quat, 16); + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// Double-qualifier floating-point aligned quaternion. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64quat, aligned_f64quat, 32); + +# endif//GLM_FORCE_SINGLE_ONLY + + /// @} +}//namespace glm + +#include "type_aligned.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/type_aligned.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/type_aligned.inl new file mode 100644 index 000000000000..54c1b818b64a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/type_aligned.inl @@ -0,0 +1,6 @@ +/// @ref gtc_type_aligned + +namespace glm +{ + +} diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/type_trait.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/type_trait.hpp new file mode 100644 index 000000000000..56685c8cb98c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/type_trait.hpp @@ -0,0 +1,85 @@ +/// @ref gtx_type_trait +/// @file glm/gtx/type_trait.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_type_trait GLM_GTX_type_trait +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defines traits for each type. + +#pragma once + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_type_trait is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_type_trait extension included") +# endif +#endif + +// Dependency: +#include "../detail/qualifier.hpp" +#include "../gtc/quaternion.hpp" +#include "../gtx/dual_quaternion.hpp" + +namespace glm +{ + /// @addtogroup gtx_type_trait + /// @{ + + template + struct type + { + static bool const is_vec = false; + static bool const is_mat = false; + static bool const is_quat = false; + static length_t const components = 0; + static length_t const cols = 0; + static length_t const rows = 0; + }; + + template + struct type > + { + static bool const is_vec = true; + static bool const is_mat = false; + static bool const is_quat = false; + static length_t const components = L; + }; + + template + struct type > + { + static bool const is_vec = false; + static bool const is_mat = true; + static bool const is_quat = false; + static length_t const components = C; + static length_t const cols = C; + static length_t const rows = R; + }; + + template + struct type > + { + static bool const is_vec = false; + static bool const is_mat = false; + static bool const is_quat = true; + static length_t const components = 4; + }; + + template + struct type > + { + static bool const is_vec = false; + static bool const is_mat = false; + static bool const is_quat = true; + static length_t const components = 8; + }; + + /// @} +}//namespace glm + +#include "type_trait.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/type_trait.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/type_trait.inl new file mode 100644 index 000000000000..045de959cc21 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/type_trait.inl @@ -0,0 +1,61 @@ +/// @ref gtx_type_trait + +namespace glm +{ + template + bool const type::is_vec; + template + bool const type::is_mat; + template + bool const type::is_quat; + template + length_t const type::components; + template + length_t const type::cols; + template + length_t const type::rows; + + // vec + template + bool const type >::is_vec; + template + bool const type >::is_mat; + template + bool const type >::is_quat; + template + length_t const type >::components; + + // mat + template + bool const type >::is_vec; + template + bool const type >::is_mat; + template + bool const type >::is_quat; + template + length_t const type >::components; + template + length_t const type >::cols; + template + length_t const type >::rows; + + // tquat + template + bool const type >::is_vec; + template + bool const type >::is_mat; + template + bool const type >::is_quat; + template + length_t const type >::components; + + // tdualquat + template + bool const type >::is_vec; + template + bool const type >::is_mat; + template + bool const type >::is_quat; + template + length_t const type >::components; +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/vec_swizzle.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/vec_swizzle.hpp new file mode 100644 index 000000000000..4440c9accd54 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/vec_swizzle.hpp @@ -0,0 +1,2786 @@ +/// @ref gtx_vec_swizzle +/// @file glm/gtx/vec_swizzle.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_vec_swizzle GLM_GTX_vec_swizzle +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Functions to perform swizzle operation. + +#pragma once + +#include "../glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_vec_swizzle is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_vec_swizzle extension included") +# endif +#endif + +namespace glm { + /// @addtogroup gtx_vec_swizzle + /// @{ + + // xx + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xx(const glm::vec<1, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xx(const glm::vec<2, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xx(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xx(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.x); + } + + // xy + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xy(const glm::vec<2, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xy(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xy(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.y); + } + + // xz + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xz(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xz(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.z); + } + + // xw + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xw(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.w); + } + + // yx + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yx(const glm::vec<2, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yx(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yx(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.x); + } + + // yy + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yy(const glm::vec<2, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yy(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yy(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.y); + } + + // yz + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yz(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yz(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.z); + } + + // yw + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yw(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.w); + } + + // zx + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zx(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zx(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.x); + } + + // zy + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zy(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zy(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.y); + } + + // zz + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zz(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zz(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.z); + } + + // zw + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zw(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.w); + } + + // wx + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> wx(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.w, v.x); + } + + // wy + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> wy(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.w, v.y); + } + + // wz + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> wz(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.w, v.z); + } + + // ww + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> ww(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.w, v.w); + } + + // xxx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxx(const glm::vec<1, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxx(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.x); + } + + // xxy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxy(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.y); + } + + // xxz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.z); + } + + // xxw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.w); + } + + // xyx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyx(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.x); + } + + // xyy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyy(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.y); + } + + // xyz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.z); + } + + // xyw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.w); + } + + // xzx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.x); + } + + // xzy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.y); + } + + // xzz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.z); + } + + // xzw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.w); + } + + // xwx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xwx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.w, v.x); + } + + // xwy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xwy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.w, v.y); + } + + // xwz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xwz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.w, v.z); + } + + // xww + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xww(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.w, v.w); + } + + // yxx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxx(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.x); + } + + // yxy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxy(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.y); + } + + // yxz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.z); + } + + // yxw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.w); + } + + // yyx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyx(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.x); + } + + // yyy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyy(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.y); + } + + // yyz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.z); + } + + // yyw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.w); + } + + // yzx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.x); + } + + // yzy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.y); + } + + // yzz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.z); + } + + // yzw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.w); + } + + // ywx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> ywx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.w, v.x); + } + + // ywy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> ywy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.w, v.y); + } + + // ywz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> ywz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.w, v.z); + } + + // yww + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yww(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.w, v.w); + } + + // zxx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.x); + } + + // zxy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.y); + } + + // zxz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.z); + } + + // zxw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.w); + } + + // zyx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.x); + } + + // zyy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.y); + } + + // zyz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.z); + } + + // zyw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.w); + } + + // zzx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.x); + } + + // zzy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.y); + } + + // zzz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.z); + } + + // zzw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.w); + } + + // zwx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zwx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.w, v.x); + } + + // zwy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zwy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.w, v.y); + } + + // zwz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zwz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.w, v.z); + } + + // zww + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zww(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.w, v.w); + } + + // wxx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wxx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.x, v.x); + } + + // wxy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wxy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.x, v.y); + } + + // wxz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wxz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.x, v.z); + } + + // wxw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wxw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.x, v.w); + } + + // wyx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wyx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.y, v.x); + } + + // wyy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wyy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.y, v.y); + } + + // wyz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wyz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.y, v.z); + } + + // wyw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wyw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.y, v.w); + } + + // wzx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wzx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.z, v.x); + } + + // wzy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wzy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.z, v.y); + } + + // wzz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wzz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.z, v.z); + } + + // wzw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wzw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.z, v.w); + } + + // wwx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wwx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.w, v.x); + } + + // wwy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wwy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.w, v.y); + } + + // wwz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wwz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.w, v.z); + } + + // www + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> www(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.w, v.w); + } + + // xxxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxx(const glm::vec<1, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); + } + + // xxxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); + } + + // xxxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.z); + } + + // xxxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.w); + } + + // xxyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); + } + + // xxyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); + } + + // xxyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.z); + } + + // xxyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.w); + } + + // xxzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.x); + } + + // xxzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.y); + } + + // xxzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.z); + } + + // xxzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.w); + } + + // xxwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.w, v.x); + } + + // xxwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.w, v.y); + } + + // xxwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.w, v.z); + } + + // xxww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.w, v.w); + } + + // xyxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); + } + + // xyxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); + } + + // xyxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.z); + } + + // xyxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.w); + } + + // xyyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); + } + + // xyyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); + } + + // xyyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.z); + } + + // xyyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.w); + } + + // xyzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.x); + } + + // xyzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.y); + } + + // xyzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.z); + } + + // xyzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.w); + } + + // xywx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xywx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.w, v.x); + } + + // xywy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xywy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.w, v.y); + } + + // xywz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xywz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.w, v.z); + } + + // xyww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.w, v.w); + } + + // xzxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.x); + } + + // xzxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.y); + } + + // xzxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.z); + } + + // xzxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.w); + } + + // xzyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.x); + } + + // xzyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.y); + } + + // xzyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.z); + } + + // xzyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.w); + } + + // xzzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.x); + } + + // xzzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.y); + } + + // xzzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.z); + } + + // xzzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.w); + } + + // xzwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.w, v.x); + } + + // xzwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.w, v.y); + } + + // xzwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.w, v.z); + } + + // xzww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.w, v.w); + } + + // xwxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.x, v.x); + } + + // xwxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.x, v.y); + } + + // xwxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.x, v.z); + } + + // xwxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.x, v.w); + } + + // xwyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.y, v.x); + } + + // xwyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.y, v.y); + } + + // xwyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.y, v.z); + } + + // xwyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.y, v.w); + } + + // xwzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.z, v.x); + } + + // xwzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.z, v.y); + } + + // xwzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.z, v.z); + } + + // xwzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.z, v.w); + } + + // xwwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.w, v.x); + } + + // xwwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.w, v.y); + } + + // xwwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.w, v.z); + } + + // xwww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.w, v.w); + } + + // yxxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); + } + + // yxxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); + } + + // yxxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.z); + } + + // yxxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.w); + } + + // yxyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); + } + + // yxyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); + } + + // yxyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.z); + } + + // yxyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.w); + } + + // yxzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.x); + } + + // yxzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.y); + } + + // yxzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.z); + } + + // yxzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.w); + } + + // yxwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.w, v.x); + } + + // yxwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.w, v.y); + } + + // yxwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.w, v.z); + } + + // yxww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.w, v.w); + } + + // yyxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); + } + + // yyxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); + } + + // yyxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.z); + } + + // yyxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.w); + } + + // yyyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); + } + + // yyyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); + } + + // yyyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.z); + } + + // yyyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.w); + } + + // yyzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.x); + } + + // yyzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.y); + } + + // yyzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.z); + } + + // yyzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.w); + } + + // yywx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yywx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.w, v.x); + } + + // yywy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yywy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.w, v.y); + } + + // yywz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yywz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.w, v.z); + } + + // yyww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.w, v.w); + } + + // yzxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.x); + } + + // yzxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.y); + } + + // yzxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.z); + } + + // yzxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.w); + } + + // yzyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.x); + } + + // yzyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.y); + } + + // yzyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.z); + } + + // yzyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.w); + } + + // yzzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.x); + } + + // yzzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.y); + } + + // yzzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.z); + } + + // yzzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.w); + } + + // yzwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.w, v.x); + } + + // yzwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.w, v.y); + } + + // yzwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.w, v.z); + } + + // yzww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.w, v.w); + } + + // ywxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.x, v.x); + } + + // ywxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.x, v.y); + } + + // ywxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.x, v.z); + } + + // ywxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.x, v.w); + } + + // ywyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.y, v.x); + } + + // ywyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.y, v.y); + } + + // ywyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.y, v.z); + } + + // ywyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.y, v.w); + } + + // ywzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.z, v.x); + } + + // ywzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.z, v.y); + } + + // ywzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.z, v.z); + } + + // ywzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.z, v.w); + } + + // ywwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.w, v.x); + } + + // ywwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.w, v.y); + } + + // ywwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.w, v.z); + } + + // ywww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.w, v.w); + } + + // zxxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.x); + } + + // zxxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.y); + } + + // zxxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.z); + } + + // zxxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.w); + } + + // zxyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.x); + } + + // zxyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.y); + } + + // zxyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.z); + } + + // zxyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.w); + } + + // zxzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.x); + } + + // zxzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.y); + } + + // zxzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.z); + } + + // zxzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.w); + } + + // zxwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.w, v.x); + } + + // zxwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.w, v.y); + } + + // zxwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.w, v.z); + } + + // zxww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.w, v.w); + } + + // zyxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.x); + } + + // zyxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.y); + } + + // zyxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.z); + } + + // zyxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.w); + } + + // zyyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.x); + } + + // zyyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.y); + } + + // zyyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.z); + } + + // zyyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.w); + } + + // zyzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.x); + } + + // zyzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.y); + } + + // zyzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.z); + } + + // zyzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.w); + } + + // zywx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zywx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.w, v.x); + } + + // zywy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zywy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.w, v.y); + } + + // zywz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zywz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.w, v.z); + } + + // zyww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.w, v.w); + } + + // zzxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.x); + } + + // zzxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.y); + } + + // zzxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.z); + } + + // zzxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.w); + } + + // zzyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.x); + } + + // zzyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.y); + } + + // zzyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.z); + } + + // zzyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.w); + } + + // zzzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.x); + } + + // zzzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.y); + } + + // zzzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.z); + } + + // zzzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.w); + } + + // zzwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.w, v.x); + } + + // zzwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.w, v.y); + } + + // zzwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.w, v.z); + } + + // zzww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.w, v.w); + } + + // zwxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.x, v.x); + } + + // zwxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.x, v.y); + } + + // zwxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.x, v.z); + } + + // zwxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.x, v.w); + } + + // zwyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.y, v.x); + } + + // zwyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.y, v.y); + } + + // zwyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.y, v.z); + } + + // zwyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.y, v.w); + } + + // zwzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.z, v.x); + } + + // zwzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.z, v.y); + } + + // zwzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.z, v.z); + } + + // zwzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.z, v.w); + } + + // zwwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.w, v.x); + } + + // zwwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.w, v.y); + } + + // zwwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.w, v.z); + } + + // zwww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.w, v.w); + } + + // wxxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.x, v.x); + } + + // wxxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.x, v.y); + } + + // wxxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.x, v.z); + } + + // wxxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.x, v.w); + } + + // wxyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.y, v.x); + } + + // wxyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.y, v.y); + } + + // wxyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.y, v.z); + } + + // wxyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.y, v.w); + } + + // wxzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.z, v.x); + } + + // wxzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.z, v.y); + } + + // wxzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.z, v.z); + } + + // wxzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.z, v.w); + } + + // wxwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.w, v.x); + } + + // wxwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.w, v.y); + } + + // wxwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.w, v.z); + } + + // wxww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.w, v.w); + } + + // wyxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.x, v.x); + } + + // wyxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.x, v.y); + } + + // wyxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.x, v.z); + } + + // wyxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.x, v.w); + } + + // wyyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.y, v.x); + } + + // wyyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.y, v.y); + } + + // wyyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.y, v.z); + } + + // wyyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.y, v.w); + } + + // wyzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.z, v.x); + } + + // wyzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.z, v.y); + } + + // wyzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.z, v.z); + } + + // wyzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.z, v.w); + } + + // wywx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wywx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.w, v.x); + } + + // wywy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wywy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.w, v.y); + } + + // wywz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wywz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.w, v.z); + } + + // wyww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.w, v.w); + } + + // wzxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.x, v.x); + } + + // wzxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.x, v.y); + } + + // wzxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.x, v.z); + } + + // wzxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.x, v.w); + } + + // wzyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.y, v.x); + } + + // wzyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.y, v.y); + } + + // wzyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.y, v.z); + } + + // wzyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.y, v.w); + } + + // wzzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.z, v.x); + } + + // wzzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.z, v.y); + } + + // wzzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.z, v.z); + } + + // wzzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.z, v.w); + } + + // wzwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.w, v.x); + } + + // wzwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.w, v.y); + } + + // wzwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.w, v.z); + } + + // wzww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.w, v.w); + } + + // wwxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.x, v.x); + } + + // wwxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.x, v.y); + } + + // wwxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.x, v.z); + } + + // wwxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.x, v.w); + } + + // wwyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.y, v.x); + } + + // wwyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.y, v.y); + } + + // wwyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.y, v.z); + } + + // wwyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.y, v.w); + } + + // wwzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.z, v.x); + } + + // wwzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.z, v.y); + } + + // wwzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.z, v.z); + } + + // wwzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.z, v.w); + } + + // wwwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.w, v.x); + } + + // wwwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.w, v.y); + } + + // wwwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.w, v.z); + } + + // wwww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.w, v.w); + } + + /// @} +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_angle.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_angle.hpp new file mode 100644 index 000000000000..9ae437126b19 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_angle.hpp @@ -0,0 +1,57 @@ +/// @ref gtx_vector_angle +/// @file glm/gtx/vector_angle.hpp +/// +/// @see core (dependence) +/// @see gtx_quaternion (dependence) +/// @see gtx_epsilon (dependence) +/// +/// @defgroup gtx_vector_angle GLM_GTX_vector_angle +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Compute angle between vectors + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/epsilon.hpp" +#include "../gtx/quaternion.hpp" +#include "../gtx/rotate_vector.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_vector_angle is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_vector_angle extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_vector_angle + /// @{ + + //! Returns the absolute angle between two vectors. + //! Parameters need to be normalized. + /// @see gtx_vector_angle extension. + template + GLM_FUNC_DECL T angle(vec const& x, vec const& y); + + //! Returns the oriented angle between two 2d vectors. + //! Parameters need to be normalized. + /// @see gtx_vector_angle extension. + template + GLM_FUNC_DECL T orientedAngle(vec<2, T, Q> const& x, vec<2, T, Q> const& y); + + //! Returns the oriented angle between two 3d vectors based from a reference axis. + //! Parameters need to be normalized. + /// @see gtx_vector_angle extension. + template + GLM_FUNC_DECL T orientedAngle(vec<3, T, Q> const& x, vec<3, T, Q> const& y, vec<3, T, Q> const& ref); + + /// @} +}// namespace glm + +#include "vector_angle.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_angle.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_angle.inl new file mode 100644 index 000000000000..11e1a218372e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_angle.inl @@ -0,0 +1,45 @@ +/// @ref gtx_vector_angle + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType angle + ( + genType const& x, + genType const& y + ) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'angle' only accept floating-point inputs"); + return acos(clamp(dot(x, y), genType(-1), genType(1))); + } + + template + GLM_FUNC_QUALIFIER T angle(vec const& x, vec const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'angle' only accept floating-point inputs"); + return acos(clamp(dot(x, y), T(-1), T(1))); + } + + template + GLM_FUNC_QUALIFIER T orientedAngle(vec<2, T, Q> const& x, vec<2, T, Q> const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'orientedAngle' only accept floating-point inputs"); + T const Angle(acos(clamp(dot(x, y), T(-1), T(1)))); + + T const partialCross = x.x * y.y - y.x * x.y; + + if (partialCross > T(0)) + return Angle; + else + return -Angle; + } + + template + GLM_FUNC_QUALIFIER T orientedAngle(vec<3, T, Q> const& x, vec<3, T, Q> const& y, vec<3, T, Q> const& ref) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'orientedAngle' only accept floating-point inputs"); + + T const Angle(acos(clamp(dot(x, y), T(-1), T(1)))); + return mix(Angle, -Angle, dot(ref, cross(x, y)) < T(0)); + } +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_query.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_query.hpp new file mode 100644 index 000000000000..af1f7b9bd584 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_query.hpp @@ -0,0 +1,66 @@ +/// @ref gtx_vector_query +/// @file glm/gtx/vector_query.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_vector_query GLM_GTX_vector_query +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Query information of vector types + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_vector_query is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_vector_query extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_vector_query + /// @{ + + //! Check whether two vectors are collinears. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL bool areCollinear(vec const& v0, vec const& v1, T const& epsilon); + + //! Check whether two vectors are orthogonals. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL bool areOrthogonal(vec const& v0, vec const& v1, T const& epsilon); + + //! Check whether a vector is normalized. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL bool isNormalized(vec const& v, T const& epsilon); + + //! Check whether a vector is null. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL bool isNull(vec const& v, T const& epsilon); + + //! Check whether a each component of a vector is null. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL vec isCompNull(vec const& v, T const& epsilon); + + //! Check whether two vectors are orthonormal. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL bool areOrthonormal(vec const& v0, vec const& v1, T const& epsilon); + + /// @} +}// namespace glm + +#include "vector_query.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_query.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_query.inl new file mode 100644 index 000000000000..d1a5c9be46b1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/vector_query.inl @@ -0,0 +1,154 @@ +/// @ref gtx_vector_query + +#include + +namespace glm{ +namespace detail +{ + template + struct compute_areCollinear{}; + + template + struct compute_areCollinear<2, T, Q> + { + GLM_FUNC_QUALIFIER static bool call(vec<2, T, Q> const& v0, vec<2, T, Q> const& v1, T const& epsilon) + { + return length(cross(vec<3, T, Q>(v0, static_cast(0)), vec<3, T, Q>(v1, static_cast(0)))) < epsilon; + } + }; + + template + struct compute_areCollinear<3, T, Q> + { + GLM_FUNC_QUALIFIER static bool call(vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, T const& epsilon) + { + return length(cross(v0, v1)) < epsilon; + } + }; + + template + struct compute_areCollinear<4, T, Q> + { + GLM_FUNC_QUALIFIER static bool call(vec<4, T, Q> const& v0, vec<4, T, Q> const& v1, T const& epsilon) + { + return length(cross(vec<3, T, Q>(v0), vec<3, T, Q>(v1))) < epsilon; + } + }; + + template + struct compute_isCompNull{}; + + template + struct compute_isCompNull<2, T, Q> + { + GLM_FUNC_QUALIFIER static vec<2, bool, Q> call(vec<2, T, Q> const& v, T const& epsilon) + { + return vec<2, bool, Q>( + (abs(v.x) < epsilon), + (abs(v.y) < epsilon)); + } + }; + + template + struct compute_isCompNull<3, T, Q> + { + GLM_FUNC_QUALIFIER static vec<3, bool, Q> call(vec<3, T, Q> const& v, T const& epsilon) + { + return vec<3, bool, Q>( + (abs(v.x) < epsilon), + (abs(v.y) < epsilon), + (abs(v.z) < epsilon)); + } + }; + + template + struct compute_isCompNull<4, T, Q> + { + GLM_FUNC_QUALIFIER static vec<4, bool, Q> call(vec<4, T, Q> const& v, T const& epsilon) + { + return vec<4, bool, Q>( + (abs(v.x) < epsilon), + (abs(v.y) < epsilon), + (abs(v.z) < epsilon), + (abs(v.w) < epsilon)); + } + }; + +}//namespace detail + + template + GLM_FUNC_QUALIFIER bool areCollinear(vec const& v0, vec const& v1, T const& epsilon) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'areCollinear' only accept floating-point inputs"); + + return detail::compute_areCollinear::call(v0, v1, epsilon); + } + + template + GLM_FUNC_QUALIFIER bool areOrthogonal(vec const& v0, vec const& v1, T const& epsilon) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'areOrthogonal' only accept floating-point inputs"); + + return abs(dot(v0, v1)) <= max( + static_cast(1), + length(v0)) * max(static_cast(1), length(v1)) * epsilon; + } + + template + GLM_FUNC_QUALIFIER bool isNormalized(vec const& v, T const& epsilon) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isNormalized' only accept floating-point inputs"); + + return abs(length(v) - static_cast(1)) <= static_cast(2) * epsilon; + } + + template + GLM_FUNC_QUALIFIER bool isNull(vec const& v, T const& epsilon) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isNull' only accept floating-point inputs"); + + return length(v) <= epsilon; + } + + template + GLM_FUNC_QUALIFIER vec isCompNull(vec const& v, T const& epsilon) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isCompNull' only accept floating-point inputs"); + + return detail::compute_isCompNull::call(v, epsilon); + } + + template + GLM_FUNC_QUALIFIER vec<2, bool, Q> isCompNull(vec<2, T, Q> const& v, T const& epsilon) + { + return vec<2, bool, Q>( + abs(v.x) < epsilon, + abs(v.y) < epsilon); + } + + template + GLM_FUNC_QUALIFIER vec<3, bool, Q> isCompNull(vec<3, T, Q> const& v, T const& epsilon) + { + return vec<3, bool, Q>( + abs(v.x) < epsilon, + abs(v.y) < epsilon, + abs(v.z) < epsilon); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> isCompNull(vec<4, T, Q> const& v, T const& epsilon) + { + return vec<4, bool, Q>( + abs(v.x) < epsilon, + abs(v.y) < epsilon, + abs(v.z) < epsilon, + abs(v.w) < epsilon); + } + + template + GLM_FUNC_QUALIFIER bool areOrthonormal(vec const& v0, vec const& v1, T const& epsilon) + { + return isNormalized(v0, epsilon) && isNormalized(v1, epsilon) && (abs(dot(v0, v1)) <= epsilon); + } + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/wrap.hpp b/thirdparty/manifold/thirdparty/glm/glm/gtx/wrap.hpp new file mode 100644 index 000000000000..ad4eb3fca740 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/wrap.hpp @@ -0,0 +1,37 @@ +/// @ref gtx_wrap +/// @file glm/gtx/wrap.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_wrap GLM_GTX_wrap +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Wrapping mode of texture coordinates. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../ext/scalar_common.hpp" +#include "../ext/vector_common.hpp" +#include "../gtc/vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_wrap is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_wrap extension included") +# endif +#endif + +namespace glm +{ + /// @addtogroup gtx_wrap + /// @{ + + /// @} +}// namespace glm + +#include "wrap.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/gtx/wrap.inl b/thirdparty/manifold/thirdparty/glm/glm/gtx/wrap.inl new file mode 100644 index 000000000000..4be3b4c38aee --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/gtx/wrap.inl @@ -0,0 +1,6 @@ +/// @ref gtx_wrap + +namespace glm +{ + +}//namespace glm diff --git a/thirdparty/manifold/thirdparty/glm/glm/integer.hpp b/thirdparty/manifold/thirdparty/glm/glm/integer.hpp new file mode 100644 index 000000000000..8817db3f0a22 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/integer.hpp @@ -0,0 +1,212 @@ +/// @ref core +/// @file glm/integer.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.8 Integer Functions +/// +/// @defgroup core_func_integer Integer functions +/// @ingroup core +/// +/// Provides GLSL functions on integer types +/// +/// These all operate component-wise. The description is per component. +/// The notation [a, b] means the set of bits from bit-number a through bit-number +/// b, inclusive. The lowest-order bit is bit 0. +/// +/// Include to use these core features. + +#pragma once + +#include "detail/qualifier.hpp" +#include "common.hpp" +#include "vector_relational.hpp" + +namespace glm +{ + /// @addtogroup core_func_integer + /// @{ + + /// Adds 32-bit unsigned integer x and y, returning the sum + /// modulo pow(2, 32). The value carry is set to 0 if the sum was + /// less than pow(2, 32), or to 1 otherwise. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL uaddCarry man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec uaddCarry( + vec const& x, + vec const& y, + vec & carry); + + /// Subtracts the 32-bit unsigned integer y from x, returning + /// the difference if non-negative, or pow(2, 32) plus the difference + /// otherwise. The value borrow is set to 0 if x >= y, or to 1 otherwise. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL usubBorrow man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec usubBorrow( + vec const& x, + vec const& y, + vec & borrow); + + /// Multiplies 32-bit integers x and y, producing a 64-bit + /// result. The 32 least-significant bits are returned in lsb. + /// The 32 most-significant bits are returned in msb. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL umulExtended man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL void umulExtended( + vec const& x, + vec const& y, + vec & msb, + vec & lsb); + + /// Multiplies 32-bit integers x and y, producing a 64-bit + /// result. The 32 least-significant bits are returned in lsb. + /// The 32 most-significant bits are returned in msb. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL imulExtended man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL void imulExtended( + vec const& x, + vec const& y, + vec & msb, + vec & lsb); + + /// Extracts bits [offset, offset + bits - 1] from value, + /// returning them in the least significant bits of the result. + /// For unsigned data types, the most significant bits of the + /// result will be set to zero. For signed data types, the + /// most significant bits will be set to the value of bit offset + base - 1. + /// + /// If bits is zero, the result will be zero. The result will be + /// undefined if offset or bits is negative, or if the sum of + /// offset and bits is greater than the number of bits used + /// to store the operand. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar types. + /// + /// @see GLSL bitfieldExtract man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec bitfieldExtract( + vec const& Value, + int Offset, + int Bits); + + /// Returns the insertion the bits least-significant bits of insert into base. + /// + /// The result will have bits [offset, offset + bits - 1] taken + /// from bits [0, bits - 1] of insert, and all other bits taken + /// directly from the corresponding bits of base. If bits is + /// zero, the result will simply be base. The result will be + /// undefined if offset or bits is negative, or if the sum of + /// offset and bits is greater than the number of bits used to + /// store the operand. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar or vector types. + /// + /// @see GLSL bitfieldInsert man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec bitfieldInsert( + vec const& Base, + vec const& Insert, + int Offset, + int Bits); + + /// Returns the reversal of the bits of value. + /// The bit numbered n of the result will be taken from bit (bits - 1) - n of value, + /// where bits is the total number of bits used to represent value. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar or vector types. + /// + /// @see GLSL bitfieldReverse man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec bitfieldReverse(vec const& v); + + /// Returns the number of bits set to 1 in the binary representation of value. + /// + /// @tparam genType Signed or unsigned integer scalar or vector types. + /// + /// @see GLSL bitCount man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL int bitCount(genType v); + + /// Returns the number of bits set to 1 in the binary representation of value. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar or vector types. + /// + /// @see GLSL bitCount man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec bitCount(vec const& v); + + /// Returns the bit number of the least significant bit set to + /// 1 in the binary representation of value. + /// If value is zero, -1 will be returned. + /// + /// @tparam genIUType Signed or unsigned integer scalar types. + /// + /// @see GLSL findLSB man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL int findLSB(genIUType x); + + /// Returns the bit number of the least significant bit set to + /// 1 in the binary representation of value. + /// If value is zero, -1 will be returned. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar types. + /// + /// @see GLSL findLSB man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec findLSB(vec const& v); + + /// Returns the bit number of the most significant bit in the binary representation of value. + /// For positive integers, the result will be the bit number of the most significant bit set to 1. + /// For negative integers, the result will be the bit number of the most significant + /// bit set to 0. For a value of zero or negative one, -1 will be returned. + /// + /// @tparam genIUType Signed or unsigned integer scalar types. + /// + /// @see GLSL findMSB man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL int findMSB(genIUType x); + + /// Returns the bit number of the most significant bit in the binary representation of value. + /// For positive integers, the result will be the bit number of the most significant bit set to 1. + /// For negative integers, the result will be the bit number of the most significant + /// bit set to 0. For a value of zero or negative one, -1 will be returned. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar types. + /// + /// @see GLSL findMSB man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec findMSB(vec const& v); + + /// @} +}//namespace glm + +#include "detail/func_integer.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/mat2x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/mat2x2.hpp new file mode 100644 index 000000000000..96bec96b9a63 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/mat2x2.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat2x2.hpp + +#pragma once +#include "./ext/matrix_double2x2.hpp" +#include "./ext/matrix_double2x2_precision.hpp" +#include "./ext/matrix_float2x2.hpp" +#include "./ext/matrix_float2x2_precision.hpp" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/mat2x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/mat2x3.hpp new file mode 100644 index 000000000000..d68dc25eda94 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/mat2x3.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat2x3.hpp + +#pragma once +#include "./ext/matrix_double2x3.hpp" +#include "./ext/matrix_double2x3_precision.hpp" +#include "./ext/matrix_float2x3.hpp" +#include "./ext/matrix_float2x3_precision.hpp" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/mat2x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/mat2x4.hpp new file mode 100644 index 000000000000..b04b7387b1a3 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/mat2x4.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat2x4.hpp + +#pragma once +#include "./ext/matrix_double2x4.hpp" +#include "./ext/matrix_double2x4_precision.hpp" +#include "./ext/matrix_float2x4.hpp" +#include "./ext/matrix_float2x4_precision.hpp" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/mat3x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/mat3x2.hpp new file mode 100644 index 000000000000..c85315372dc0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/mat3x2.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat3x2.hpp + +#pragma once +#include "./ext/matrix_double3x2.hpp" +#include "./ext/matrix_double3x2_precision.hpp" +#include "./ext/matrix_float3x2.hpp" +#include "./ext/matrix_float3x2_precision.hpp" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/mat3x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/mat3x3.hpp new file mode 100644 index 000000000000..fd4fa31cdee0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/mat3x3.hpp @@ -0,0 +1,8 @@ +/// @ref core +/// @file glm/mat3x3.hpp + +#pragma once +#include "./ext/matrix_double3x3.hpp" +#include "./ext/matrix_double3x3_precision.hpp" +#include "./ext/matrix_float3x3.hpp" +#include "./ext/matrix_float3x3_precision.hpp" diff --git a/thirdparty/manifold/thirdparty/glm/glm/mat3x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/mat3x4.hpp new file mode 100644 index 000000000000..6342bf5b992d --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/mat3x4.hpp @@ -0,0 +1,8 @@ +/// @ref core +/// @file glm/mat3x4.hpp + +#pragma once +#include "./ext/matrix_double3x4.hpp" +#include "./ext/matrix_double3x4_precision.hpp" +#include "./ext/matrix_float3x4.hpp" +#include "./ext/matrix_float3x4_precision.hpp" diff --git a/thirdparty/manifold/thirdparty/glm/glm/mat4x2.hpp b/thirdparty/manifold/thirdparty/glm/glm/mat4x2.hpp new file mode 100644 index 000000000000..e013e46b9c20 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/mat4x2.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat4x2.hpp + +#pragma once +#include "./ext/matrix_double4x2.hpp" +#include "./ext/matrix_double4x2_precision.hpp" +#include "./ext/matrix_float4x2.hpp" +#include "./ext/matrix_float4x2_precision.hpp" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/mat4x3.hpp b/thirdparty/manifold/thirdparty/glm/glm/mat4x3.hpp new file mode 100644 index 000000000000..205725abd25a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/mat4x3.hpp @@ -0,0 +1,8 @@ +/// @ref core +/// @file glm/mat4x3.hpp + +#pragma once +#include "./ext/matrix_double4x3.hpp" +#include "./ext/matrix_double4x3_precision.hpp" +#include "./ext/matrix_float4x3.hpp" +#include "./ext/matrix_float4x3_precision.hpp" diff --git a/thirdparty/manifold/thirdparty/glm/glm/mat4x4.hpp b/thirdparty/manifold/thirdparty/glm/glm/mat4x4.hpp new file mode 100644 index 000000000000..3515f7f370bf --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/mat4x4.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat4x4.hpp + +#pragma once +#include "./ext/matrix_double4x4.hpp" +#include "./ext/matrix_double4x4_precision.hpp" +#include "./ext/matrix_float4x4.hpp" +#include "./ext/matrix_float4x4_precision.hpp" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/matrix.hpp b/thirdparty/manifold/thirdparty/glm/glm/matrix.hpp new file mode 100644 index 000000000000..4584c92c3c4e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/matrix.hpp @@ -0,0 +1,161 @@ +/// @ref core +/// @file glm/matrix.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions +/// +/// @defgroup core_func_matrix Matrix functions +/// @ingroup core +/// +/// Provides GLSL matrix functions. +/// +/// Include to use these core features. + +#pragma once + +// Dependencies +#include "detail/qualifier.hpp" +#include "detail/setup.hpp" +#include "vec2.hpp" +#include "vec3.hpp" +#include "vec4.hpp" +#include "mat2x2.hpp" +#include "mat2x3.hpp" +#include "mat2x4.hpp" +#include "mat3x2.hpp" +#include "mat3x3.hpp" +#include "mat3x4.hpp" +#include "mat4x2.hpp" +#include "mat4x3.hpp" +#include "mat4x4.hpp" + +namespace glm { +namespace detail +{ + template + struct outerProduct_trait{}; + + template + struct outerProduct_trait<2, 2, T, Q> + { + typedef mat<2, 2, T, Q> type; + }; + + template + struct outerProduct_trait<2, 3, T, Q> + { + typedef mat<3, 2, T, Q> type; + }; + + template + struct outerProduct_trait<2, 4, T, Q> + { + typedef mat<4, 2, T, Q> type; + }; + + template + struct outerProduct_trait<3, 2, T, Q> + { + typedef mat<2, 3, T, Q> type; + }; + + template + struct outerProduct_trait<3, 3, T, Q> + { + typedef mat<3, 3, T, Q> type; + }; + + template + struct outerProduct_trait<3, 4, T, Q> + { + typedef mat<4, 3, T, Q> type; + }; + + template + struct outerProduct_trait<4, 2, T, Q> + { + typedef mat<2, 4, T, Q> type; + }; + + template + struct outerProduct_trait<4, 3, T, Q> + { + typedef mat<3, 4, T, Q> type; + }; + + template + struct outerProduct_trait<4, 4, T, Q> + { + typedef mat<4, 4, T, Q> type; + }; +}//namespace detail + + /// @addtogroup core_func_matrix + /// @{ + + /// Multiply matrix x by matrix y component-wise, i.e., + /// result[i][j] is the scalar product of x[i][j] and y[i][j]. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL matrixCompMult man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL mat matrixCompMult(mat const& x, mat const& y); + + /// Treats the first parameter c as a column vector + /// and the second parameter r as a row vector + /// and does a linear algebraic matrix multiply c * r. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL outerProduct man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL typename detail::outerProduct_trait::type outerProduct(vec const& c, vec const& r); + + /// Returns the transposed matrix of x + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL transpose man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL typename mat::transpose_type transpose(mat const& x); + + /// Return the determinant of a squared matrix. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL determinant man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL T determinant(mat const& m); + + /// Return the inverse of a squared matrix. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL inverse man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL mat inverse(mat const& m); + + /// @} +}//namespace glm + +#include "detail/func_matrix.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/packing.hpp b/thirdparty/manifold/thirdparty/glm/glm/packing.hpp new file mode 100644 index 000000000000..ca83ac1dec96 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/packing.hpp @@ -0,0 +1,173 @@ +/// @ref core +/// @file glm/packing.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions +/// @see gtc_packing +/// +/// @defgroup core_func_packing Floating-Point Pack and Unpack Functions +/// @ingroup core +/// +/// Provides GLSL functions to pack and unpack half, single and double-precision floating point values into more compact integer types. +/// +/// These functions do not operate component-wise, rather as described in each case. +/// +/// Include to use these core features. + +#pragma once + +#include "./ext/vector_uint2.hpp" +#include "./ext/vector_float2.hpp" +#include "./ext/vector_float4.hpp" + +namespace glm +{ + /// @addtogroup core_func_packing + /// @{ + + /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm2x16: round(clamp(c, 0, +1) * 65535.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see GLSL packUnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint packUnorm2x16(vec2 const& v); + + /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packSnorm2x16: round(clamp(v, -1, +1) * 32767.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see GLSL packSnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint packSnorm2x16(vec2 const& v); + + /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm4x8: round(clamp(c, 0, +1) * 255.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see GLSL packUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint packUnorm4x8(vec4 const& v); + + /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packSnorm4x8: round(clamp(c, -1, +1) * 127.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see GLSL packSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint packSnorm4x8(vec4 const& v); + + /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnorm2x16: f / 65535.0 + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see GLSL unpackUnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec2 unpackUnorm2x16(uint p); + + /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm2x16: clamp(f / 32767.0, -1, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see GLSL unpackSnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec2 unpackSnorm2x16(uint p); + + /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnorm4x8: f / 255.0 + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see GLSL unpackUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec4 unpackUnorm4x8(uint p); + + /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm4x8: clamp(f / 127.0, -1, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see GLSL unpackSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec4 unpackSnorm4x8(uint p); + + /// Returns a double-qualifier value obtained by packing the components of v into a 64-bit value. + /// If an IEEE 754 Inf or NaN is created, it will not signal, and the resulting floating point value is unspecified. + /// Otherwise, the bit- level representation of v is preserved. + /// The first vector component specifies the 32 least significant bits; + /// the second component specifies the 32 most significant bits. + /// + /// @see GLSL packDouble2x32 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL double packDouble2x32(uvec2 const& v); + + /// Returns a two-component unsigned integer vector representation of v. + /// The bit-level representation of v is preserved. + /// The first component of the vector contains the 32 least significant bits of the double; + /// the second component consists the 32 most significant bits. + /// + /// @see GLSL unpackDouble2x32 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uvec2 unpackDouble2x32(double v); + + /// Returns an unsigned integer obtained by converting the components of a two-component floating-point vector + /// to the 16-bit floating-point representation found in the OpenGL Specification, + /// and then packing these two 16- bit integers into a 32-bit unsigned integer. + /// The first vector component specifies the 16 least-significant bits of the result; + /// the second component specifies the 16 most-significant bits. + /// + /// @see GLSL packHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint packHalf2x16(vec2 const& v); + + /// Returns a two-component floating-point vector with components obtained by unpacking a 32-bit unsigned integer into a pair of 16-bit values, + /// interpreting those values as 16-bit floating-point numbers according to the OpenGL Specification, + /// and converting them to 32-bit floating-point values. + /// The first component of the vector is obtained from the 16 least-significant bits of v; + /// the second component is obtained from the 16 most-significant bits of v. + /// + /// @see GLSL unpackHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec2 unpackHalf2x16(uint v); + + /// @} +}//namespace glm + +#include "detail/func_packing.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/simd/common.h b/thirdparty/manifold/thirdparty/glm/glm/simd/common.h new file mode 100644 index 000000000000..9b017cb4256e --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/simd/common.h @@ -0,0 +1,240 @@ +/// @ref simd +/// @file glm/simd/common.h + +#pragma once + +#include "platform.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_add(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_add_ps(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_add(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_add_ss(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sub(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_sub_ps(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sub(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_sub_ss(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_mul(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_mul_ps(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_mul(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_mul_ss(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_div_ps(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_div(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_div_ss(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div_lowp(glm_f32vec4 a, glm_f32vec4 b) +{ + return glm_vec4_mul(a, _mm_rcp_ps(b)); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_swizzle_xyzw(glm_f32vec4 a) +{ +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + return _mm_permute_ps(a, _MM_SHUFFLE(3, 2, 1, 0)); +# else + return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 1, 0)); +# endif +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c) +{ +# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG) + return _mm_fmadd_ss(a, b, c); +# else + return _mm_add_ss(_mm_mul_ss(a, b), c); +# endif +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c) +{ +# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG) + return _mm_fmadd_ps(a, b, c); +# else + return glm_vec4_add(glm_vec4_mul(a, b), c); +# endif +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_abs(glm_f32vec4 x) +{ + return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF))); +} + +GLM_FUNC_QUALIFIER glm_ivec4 glm_ivec4_abs(glm_ivec4 x) +{ +# if GLM_ARCH & GLM_ARCH_SSSE3_BIT + return _mm_sign_epi32(x, x); +# else + glm_ivec4 const sgn0 = _mm_srai_epi32(x, 31); + glm_ivec4 const inv0 = _mm_xor_si128(x, sgn0); + glm_ivec4 const sub0 = _mm_sub_epi32(inv0, sgn0); + return sub0; +# endif +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sign(glm_vec4 x) +{ + glm_vec4 const zro0 = _mm_setzero_ps(); + glm_vec4 const cmp0 = _mm_cmplt_ps(x, zro0); + glm_vec4 const cmp1 = _mm_cmpgt_ps(x, zro0); + glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f)); + glm_vec4 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f)); + glm_vec4 const or0 = _mm_or_ps(and0, and1); + return or0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_round(glm_vec4 x) +{ +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + return _mm_round_ps(x, _MM_FROUND_TO_NEAREST_INT); +# else + glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000))); + glm_vec4 const and0 = _mm_and_ps(sgn0, x); + glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); + glm_vec4 const add0 = glm_vec4_add(x, or0); + glm_vec4 const sub0 = glm_vec4_sub(add0, or0); + return sub0; +# endif +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_floor(glm_vec4 x) +{ +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + return _mm_floor_ps(x); +# else + glm_vec4 const rnd0 = glm_vec4_round(x); + glm_vec4 const cmp0 = _mm_cmplt_ps(x, rnd0); + glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); + glm_vec4 const sub0 = glm_vec4_sub(rnd0, and0); + return sub0; +# endif +} + +/* trunc TODO +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_trunc(glm_vec4 x) +{ + return glm_vec4(); +} +*/ + +//roundEven +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_roundEven(glm_vec4 x) +{ + glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000))); + glm_vec4 const and0 = _mm_and_ps(sgn0, x); + glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); + glm_vec4 const add0 = glm_vec4_add(x, or0); + glm_vec4 const sub0 = glm_vec4_sub(add0, or0); + return sub0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_ceil(glm_vec4 x) +{ +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + return _mm_ceil_ps(x); +# else + glm_vec4 const rnd0 = glm_vec4_round(x); + glm_vec4 const cmp0 = _mm_cmpgt_ps(x, rnd0); + glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); + glm_vec4 const add0 = glm_vec4_add(rnd0, and0); + return add0; +# endif +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fract(glm_vec4 x) +{ + glm_vec4 const flr0 = glm_vec4_floor(x); + glm_vec4 const sub0 = glm_vec4_sub(x, flr0); + return sub0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mod(glm_vec4 x, glm_vec4 y) +{ + glm_vec4 const div0 = glm_vec4_div(x, y); + glm_vec4 const flr0 = glm_vec4_floor(div0); + glm_vec4 const mul0 = glm_vec4_mul(y, flr0); + glm_vec4 const sub0 = glm_vec4_sub(x, mul0); + return sub0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_clamp(glm_vec4 v, glm_vec4 minVal, glm_vec4 maxVal) +{ + glm_vec4 const min0 = _mm_min_ps(v, maxVal); + glm_vec4 const max0 = _mm_max_ps(min0, minVal); + return max0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mix(glm_vec4 v1, glm_vec4 v2, glm_vec4 a) +{ + glm_vec4 const sub0 = glm_vec4_sub(_mm_set1_ps(1.0f), a); + glm_vec4 const mul0 = glm_vec4_mul(v1, sub0); + glm_vec4 const mad0 = glm_vec4_fma(v2, a, mul0); + return mad0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_step(glm_vec4 edge, glm_vec4 x) +{ + glm_vec4 const cmp = _mm_cmple_ps(x, edge); + return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps(); +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_smoothstep(glm_vec4 edge0, glm_vec4 edge1, glm_vec4 x) +{ + glm_vec4 const sub0 = glm_vec4_sub(x, edge0); + glm_vec4 const sub1 = glm_vec4_sub(edge1, edge0); + glm_vec4 const div0 = glm_vec4_sub(sub0, sub1); + glm_vec4 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f)); + glm_vec4 const mul0 = glm_vec4_mul(_mm_set1_ps(2.0f), clp0); + glm_vec4 const sub2 = glm_vec4_sub(_mm_set1_ps(3.0f), mul0); + glm_vec4 const mul1 = glm_vec4_mul(clp0, clp0); + glm_vec4 const mul2 = glm_vec4_mul(mul1, sub2); + return mul2; +} + +// Agner Fog method +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_nan(glm_vec4 x) +{ + glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer + glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit + glm_ivec4 const t3 = _mm_set1_epi32(int(0xFF000000)); // exponent mask + glm_ivec4 const t4 = _mm_and_si128(t2, t3); // exponent + glm_ivec4 const t5 = _mm_andnot_si128(t3, t2); // fraction + glm_ivec4 const Equal = _mm_cmpeq_epi32(t3, t4); + glm_ivec4 const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128()); + glm_ivec4 const And = _mm_and_si128(Equal, Nequal); + return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0 +} + +// Agner Fog method +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_inf(glm_vec4 x) +{ + glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer + glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit + return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(int(0xFF000000)))); // exponent is all 1s, fraction is 0 +} + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/simd/exponential.h b/thirdparty/manifold/thirdparty/glm/glm/simd/exponential.h new file mode 100644 index 000000000000..bc351d0119b9 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/simd/exponential.h @@ -0,0 +1,20 @@ +/// @ref simd +/// @file glm/simd/experimental.h + +#pragma once + +#include "platform.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sqrt_lowp(glm_f32vec4 x) +{ + return _mm_mul_ss(_mm_rsqrt_ss(x), x); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sqrt_lowp(glm_f32vec4 x) +{ + return _mm_mul_ps(_mm_rsqrt_ps(x), x); +} + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/simd/geometric.h b/thirdparty/manifold/thirdparty/glm/glm/simd/geometric.h new file mode 100644 index 000000000000..07d7cbcc425f --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/simd/geometric.h @@ -0,0 +1,124 @@ +/// @ref simd +/// @file glm/simd/geometric.h + +#pragma once + +#include "common.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +GLM_FUNC_DECL glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2); +GLM_FUNC_DECL glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2); + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_length(glm_vec4 x) +{ + glm_vec4 const dot0 = glm_vec4_dot(x, x); + glm_vec4 const sqt0 = _mm_sqrt_ps(dot0); + return sqt0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_distance(glm_vec4 p0, glm_vec4 p1) +{ + glm_vec4 const sub0 = _mm_sub_ps(p0, p1); + glm_vec4 const len0 = glm_vec4_length(sub0); + return len0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2) +{ +# if GLM_ARCH & GLM_ARCH_AVX_BIT + return _mm_dp_ps(v1, v2, 0xff); +# elif GLM_ARCH & GLM_ARCH_SSE3_BIT + glm_vec4 const mul0 = _mm_mul_ps(v1, v2); + glm_vec4 const hadd0 = _mm_hadd_ps(mul0, mul0); + glm_vec4 const hadd1 = _mm_hadd_ps(hadd0, hadd0); + return hadd1; +# else + glm_vec4 const mul0 = _mm_mul_ps(v1, v2); + glm_vec4 const swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1)); + glm_vec4 const add0 = _mm_add_ps(mul0, swp0); + glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3)); + glm_vec4 const add1 = _mm_add_ps(add0, swp1); + return add1; +# endif +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2) +{ +# if GLM_ARCH & GLM_ARCH_AVX_BIT + return _mm_dp_ps(v1, v2, 0xff); +# elif GLM_ARCH & GLM_ARCH_SSE3_BIT + glm_vec4 const mul0 = _mm_mul_ps(v1, v2); + glm_vec4 const had0 = _mm_hadd_ps(mul0, mul0); + glm_vec4 const had1 = _mm_hadd_ps(had0, had0); + return had1; +# else + glm_vec4 const mul0 = _mm_mul_ps(v1, v2); + glm_vec4 const mov0 = _mm_movehl_ps(mul0, mul0); + glm_vec4 const add0 = _mm_add_ps(mov0, mul0); + glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, 1); + glm_vec4 const add1 = _mm_add_ss(add0, swp1); + return add1; +# endif +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_cross(glm_vec4 v1, glm_vec4 v2) +{ + glm_vec4 const swp0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 0, 2, 1)); + glm_vec4 const swp1 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 1, 0, 2)); + glm_vec4 const swp2 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 0, 2, 1)); + glm_vec4 const swp3 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 1, 0, 2)); + glm_vec4 const mul0 = _mm_mul_ps(swp0, swp3); + glm_vec4 const mul1 = _mm_mul_ps(swp1, swp2); + glm_vec4 const sub0 = _mm_sub_ps(mul0, mul1); + return sub0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_normalize(glm_vec4 v) +{ + glm_vec4 const dot0 = glm_vec4_dot(v, v); + glm_vec4 const isr0 = _mm_rsqrt_ps(dot0); + glm_vec4 const mul0 = _mm_mul_ps(v, isr0); + return mul0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_faceforward(glm_vec4 N, glm_vec4 I, glm_vec4 Nref) +{ + glm_vec4 const dot0 = glm_vec4_dot(Nref, I); + glm_vec4 const sgn0 = glm_vec4_sign(dot0); + glm_vec4 const mul0 = _mm_mul_ps(sgn0, _mm_set1_ps(-1.0f)); + glm_vec4 const mul1 = _mm_mul_ps(N, mul0); + return mul1; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_reflect(glm_vec4 I, glm_vec4 N) +{ + glm_vec4 const dot0 = glm_vec4_dot(N, I); + glm_vec4 const mul0 = _mm_mul_ps(N, dot0); + glm_vec4 const mul1 = _mm_mul_ps(mul0, _mm_set1_ps(2.0f)); + glm_vec4 const sub0 = _mm_sub_ps(I, mul1); + return sub0; +} + +GLM_FUNC_QUALIFIER __m128 glm_vec4_refract(glm_vec4 I, glm_vec4 N, glm_vec4 eta) +{ + glm_vec4 const dot0 = glm_vec4_dot(N, I); + glm_vec4 const mul0 = _mm_mul_ps(eta, eta); + glm_vec4 const mul1 = _mm_mul_ps(dot0, dot0); + glm_vec4 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), mul0); + glm_vec4 const sub1 = _mm_sub_ps(_mm_set1_ps(1.0f), mul1); + glm_vec4 const mul2 = _mm_mul_ps(sub0, sub1); + + if(_mm_movemask_ps(_mm_cmplt_ss(mul2, _mm_set1_ps(0.0f))) == 0) + return _mm_set1_ps(0.0f); + + glm_vec4 const sqt0 = _mm_sqrt_ps(mul2); + glm_vec4 const mad0 = glm_vec4_fma(eta, dot0, sqt0); + glm_vec4 const mul4 = _mm_mul_ps(mad0, N); + glm_vec4 const mul5 = _mm_mul_ps(eta, I); + glm_vec4 const sub2 = _mm_sub_ps(mul5, mul4); + + return sub2; +} + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/simd/integer.h b/thirdparty/manifold/thirdparty/glm/glm/simd/integer.h new file mode 100644 index 000000000000..93814183fe02 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/simd/integer.h @@ -0,0 +1,115 @@ +/// @ref simd +/// @file glm/simd/integer.h + +#pragma once + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave(glm_uvec4 x) +{ + glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF); + glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF); + glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F); + glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333); + glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555); + + glm_uvec4 Reg1; + glm_uvec4 Reg2; + + // REG1 = x; + // REG2 = y; + //Reg1 = _mm_unpacklo_epi64(x, y); + Reg1 = x; + + //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF); + //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF); + Reg2 = _mm_slli_si128(Reg1, 2); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask4); + + //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF); + //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF); + Reg2 = _mm_slli_si128(Reg1, 1); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask3); + + //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F); + //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F); + Reg2 = _mm_slli_epi32(Reg1, 4); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask2); + + //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333); + //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333); + Reg2 = _mm_slli_epi32(Reg1, 2); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask1); + + //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555); + //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555); + Reg2 = _mm_slli_epi32(Reg1, 1); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask0); + + //return REG1 | (REG2 << 1); + Reg2 = _mm_slli_epi32(Reg1, 1); + Reg2 = _mm_srli_si128(Reg2, 8); + Reg1 = _mm_or_si128(Reg1, Reg2); + + return Reg1; +} + +GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave2(glm_uvec4 x, glm_uvec4 y) +{ + glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF); + glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF); + glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F); + glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333); + glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555); + + glm_uvec4 Reg1; + glm_uvec4 Reg2; + + // REG1 = x; + // REG2 = y; + Reg1 = _mm_unpacklo_epi64(x, y); + + //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF); + //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF); + Reg2 = _mm_slli_si128(Reg1, 2); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask4); + + //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF); + //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF); + Reg2 = _mm_slli_si128(Reg1, 1); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask3); + + //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F); + //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F); + Reg2 = _mm_slli_epi32(Reg1, 4); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask2); + + //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333); + //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333); + Reg2 = _mm_slli_epi32(Reg1, 2); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask1); + + //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555); + //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555); + Reg2 = _mm_slli_epi32(Reg1, 1); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask0); + + //return REG1 | (REG2 << 1); + Reg2 = _mm_slli_epi32(Reg1, 1); + Reg2 = _mm_srli_si128(Reg2, 8); + Reg1 = _mm_or_si128(Reg1, Reg2); + + return Reg1; +} + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/simd/matrix.h b/thirdparty/manifold/thirdparty/glm/glm/simd/matrix.h new file mode 100644 index 000000000000..b6c42ea4c17c --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/simd/matrix.h @@ -0,0 +1,1028 @@ +/// @ref simd +/// @file glm/simd/matrix.h + +#pragma once + +#include "geometric.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +GLM_FUNC_QUALIFIER void glm_mat4_matrixCompMult(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) +{ + out[0] = _mm_mul_ps(in1[0], in2[0]); + out[1] = _mm_mul_ps(in1[1], in2[1]); + out[2] = _mm_mul_ps(in1[2], in2[2]); + out[3] = _mm_mul_ps(in1[3], in2[3]); +} + +GLM_FUNC_QUALIFIER void glm_mat4_add(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) +{ + out[0] = _mm_add_ps(in1[0], in2[0]); + out[1] = _mm_add_ps(in1[1], in2[1]); + out[2] = _mm_add_ps(in1[2], in2[2]); + out[3] = _mm_add_ps(in1[3], in2[3]); +} + +GLM_FUNC_QUALIFIER void glm_mat4_sub(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) +{ + out[0] = _mm_sub_ps(in1[0], in2[0]); + out[1] = _mm_sub_ps(in1[1], in2[1]); + out[2] = _mm_sub_ps(in1[2], in2[2]); + out[3] = _mm_sub_ps(in1[3], in2[3]); +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_mul_vec4(glm_vec4 const m[4], glm_vec4 v) +{ + __m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 v2 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 v3 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(m[0], v0); + __m128 m1 = _mm_mul_ps(m[1], v1); + __m128 m2 = _mm_mul_ps(m[2], v2); + __m128 m3 = _mm_mul_ps(m[3], v3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + return a2; +} + +GLM_FUNC_QUALIFIER __m128 glm_vec4_mul_mat4(glm_vec4 v, glm_vec4 const m[4]) +{ + __m128 i0 = m[0]; + __m128 i1 = m[1]; + __m128 i2 = m[2]; + __m128 i3 = m[3]; + + __m128 m0 = _mm_mul_ps(v, i0); + __m128 m1 = _mm_mul_ps(v, i1); + __m128 m2 = _mm_mul_ps(v, i2); + __m128 m3 = _mm_mul_ps(v, i3); + + __m128 u0 = _mm_unpacklo_ps(m0, m1); + __m128 u1 = _mm_unpackhi_ps(m0, m1); + __m128 a0 = _mm_add_ps(u0, u1); + + __m128 u2 = _mm_unpacklo_ps(m2, m3); + __m128 u3 = _mm_unpackhi_ps(m2, m3); + __m128 a1 = _mm_add_ps(u2, u3); + + __m128 f0 = _mm_movelh_ps(a0, a1); + __m128 f1 = _mm_movehl_ps(a1, a0); + __m128 f2 = _mm_add_ps(f0, f1); + + return f2; +} + +GLM_FUNC_QUALIFIER void glm_mat4_mul(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) +{ + { + __m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[0] = a2; + } + + { + __m128 e0 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[1] = a2; + } + + { + __m128 e0 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[2] = a2; + } + + { + //(__m128&)_mm_shuffle_epi32(__m128i&)in2[0], _MM_SHUFFLE(3, 3, 3, 3)) + __m128 e0 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[3] = a2; + } +} + +GLM_FUNC_QUALIFIER void glm_mat4_transpose(glm_vec4 const in[4], glm_vec4 out[4]) +{ + __m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44); + __m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE); + __m128 tmp1 = _mm_shuffle_ps(in[2], in[3], 0x44); + __m128 tmp3 = _mm_shuffle_ps(in[2], in[3], 0xEE); + + out[0] = _mm_shuffle_ps(tmp0, tmp1, 0x88); + out[1] = _mm_shuffle_ps(tmp0, tmp1, 0xDD); + out[2] = _mm_shuffle_ps(tmp2, tmp3, 0x88); + out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD); +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_highp(glm_vec4 const in[4]) +{ + __m128 Fac0; + { + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac0 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac1; + { + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac1 = _mm_sub_ps(Mul00, Mul01); + } + + + __m128 Fac2; + { + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac2 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac3; + { + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac3 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac4; + { + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac4 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac5; + { + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac5 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); + __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); + + // m[1][0] + // m[0][0] + // m[0][0] + // m[0][0] + __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][1] + // m[0][1] + // m[0][1] + // m[0][1] + __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][2] + // m[0][2] + // m[0][2] + // m[0][2] + __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][3] + // m[0][3] + // m[0][3] + // m[0][3] + __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); + + // col0 + // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), + // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), + // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), + // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), + __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); + __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); + __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); + __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); + __m128 Add00 = _mm_add_ps(Sub00, Mul02); + __m128 Inv0 = _mm_mul_ps(SignB, Add00); + + // col1 + // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), + // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), + // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), + // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), + __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); + __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); + __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); + __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); + __m128 Add01 = _mm_add_ps(Sub01, Mul05); + __m128 Inv1 = _mm_mul_ps(SignA, Add01); + + // col2 + // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), + // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), + // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), + // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), + __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); + __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); + __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); + __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); + __m128 Add02 = _mm_add_ps(Sub02, Mul08); + __m128 Inv2 = _mm_mul_ps(SignB, Add02); + + // col3 + // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), + // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), + // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), + // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); + __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); + __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); + __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); + __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); + __m128 Add03 = _mm_add_ps(Sub03, Mul11); + __m128 Inv3 = _mm_mul_ps(SignA, Add03); + + __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); + + // valType Determinant = m[0][0] * Inverse[0][0] + // + m[0][1] * Inverse[1][0] + // + m[0][2] * Inverse[2][0] + // + m[0][3] * Inverse[3][0]; + __m128 Det0 = glm_vec4_dot(in[0], Row2); + return Det0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_lowp(glm_vec4 const m[4]) +{ + // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128( + + //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + + // First 2 columns + __m128 Swp2A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 1, 1, 2))); + __m128 Swp3A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(3, 2, 3, 3))); + __m128 MulA = _mm_mul_ps(Swp2A, Swp3A); + + // Second 2 columns + __m128 Swp2B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(3, 2, 3, 3))); + __m128 Swp3B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(0, 1, 1, 2))); + __m128 MulB = _mm_mul_ps(Swp2B, Swp3B); + + // Columns subtraction + __m128 SubE = _mm_sub_ps(MulA, MulB); + + // Last 2 rows + __m128 Swp2C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 0, 1, 2))); + __m128 Swp3C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(1, 2, 0, 0))); + __m128 MulC = _mm_mul_ps(Swp2C, Swp3C); + __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC); + + //vec<4, T, Q> DetCof( + // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), + // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), + // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), + // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); + + __m128 SubFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubE), _MM_SHUFFLE(2, 1, 0, 0))); + __m128 SwpFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(0, 0, 0, 1))); + __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA); + + __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1)); + __m128 SubFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpB), _MM_SHUFFLE(3, 1, 1, 0)));//SubF[0], SubE[3], SubE[3], SubE[1]; + __m128 SwpFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(1, 1, 2, 2))); + __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB); + + __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB); + + __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2)); + __m128 SubFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpC), _MM_SHUFFLE(3, 3, 2, 0))); + __m128 SwpFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(2, 3, 3, 3))); + __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC); + + __m128 AddRes = _mm_add_ps(SubRes, MulFacC); + __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f)); + + //return m[0][0] * DetCof[0] + // + m[0][1] * DetCof[1] + // + m[0][2] * DetCof[2] + // + m[0][3] * DetCof[3]; + + return glm_vec4_dot(m[0], DetCof); +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant(glm_vec4 const m[4]) +{ + // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add) + + //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + + // First 2 columns + __m128 Swp2A = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 1, 1, 2)); + __m128 Swp3A = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(3, 2, 3, 3)); + __m128 MulA = _mm_mul_ps(Swp2A, Swp3A); + + // Second 2 columns + __m128 Swp2B = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(3, 2, 3, 3)); + __m128 Swp3B = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(0, 1, 1, 2)); + __m128 MulB = _mm_mul_ps(Swp2B, Swp3B); + + // Columns subtraction + __m128 SubE = _mm_sub_ps(MulA, MulB); + + // Last 2 rows + __m128 Swp2C = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 0, 1, 2)); + __m128 Swp3C = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(1, 2, 0, 0)); + __m128 MulC = _mm_mul_ps(Swp2C, Swp3C); + __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC); + + //vec<4, T, Q> DetCof( + // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), + // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), + // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), + // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); + + __m128 SubFacA = _mm_shuffle_ps(SubE, SubE, _MM_SHUFFLE(2, 1, 0, 0)); + __m128 SwpFacA = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(0, 0, 0, 1)); + __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA); + + __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1)); + __m128 SubFacB = _mm_shuffle_ps(SubTmpB, SubTmpB, _MM_SHUFFLE(3, 1, 1, 0));//SubF[0], SubE[3], SubE[3], SubE[1]; + __m128 SwpFacB = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2)); + __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB); + + __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB); + + __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2)); + __m128 SubFacC = _mm_shuffle_ps(SubTmpC, SubTmpC, _MM_SHUFFLE(3, 3, 2, 0)); + __m128 SwpFacC = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(2, 3, 3, 3)); + __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC); + + __m128 AddRes = _mm_add_ps(SubRes, MulFacC); + __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f)); + + //return m[0][0] * DetCof[0] + // + m[0][1] * DetCof[1] + // + m[0][2] * DetCof[2] + // + m[0][3] * DetCof[3]; + + return glm_vec4_dot(m[0], DetCof); +} + +GLM_FUNC_QUALIFIER void glm_mat4_inverse(glm_vec4 const in[4], glm_vec4 out[4]) +{ + __m128 Fac0; + { + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac0 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac1; + { + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac1 = _mm_sub_ps(Mul00, Mul01); + } + + + __m128 Fac2; + { + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac2 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac3; + { + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac3 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac4; + { + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac4 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac5; + { + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac5 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); + __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); + + // m[1][0] + // m[0][0] + // m[0][0] + // m[0][0] + __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][1] + // m[0][1] + // m[0][1] + // m[0][1] + __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][2] + // m[0][2] + // m[0][2] + // m[0][2] + __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][3] + // m[0][3] + // m[0][3] + // m[0][3] + __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); + + // col0 + // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), + // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), + // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), + // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), + __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); + __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); + __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); + __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); + __m128 Add00 = _mm_add_ps(Sub00, Mul02); + __m128 Inv0 = _mm_mul_ps(SignB, Add00); + + // col1 + // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), + // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), + // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), + // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), + __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); + __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); + __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); + __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); + __m128 Add01 = _mm_add_ps(Sub01, Mul05); + __m128 Inv1 = _mm_mul_ps(SignA, Add01); + + // col2 + // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), + // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), + // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), + // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), + __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); + __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); + __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); + __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); + __m128 Add02 = _mm_add_ps(Sub02, Mul08); + __m128 Inv2 = _mm_mul_ps(SignB, Add02); + + // col3 + // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), + // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), + // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), + // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); + __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); + __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); + __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); + __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); + __m128 Add03 = _mm_add_ps(Sub03, Mul11); + __m128 Inv3 = _mm_mul_ps(SignA, Add03); + + __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); + + // valType Determinant = m[0][0] * Inverse[0][0] + // + m[0][1] * Inverse[1][0] + // + m[0][2] * Inverse[2][0] + // + m[0][3] * Inverse[3][0]; + __m128 Det0 = glm_vec4_dot(in[0], Row2); + __m128 Rcp0 = _mm_div_ps(_mm_set1_ps(1.0f), Det0); + //__m128 Rcp0 = _mm_rcp_ps(Det0); + + // Inverse /= Determinant; + out[0] = _mm_mul_ps(Inv0, Rcp0); + out[1] = _mm_mul_ps(Inv1, Rcp0); + out[2] = _mm_mul_ps(Inv2, Rcp0); + out[3] = _mm_mul_ps(Inv3, Rcp0); +} + +GLM_FUNC_QUALIFIER void glm_mat4_inverse_lowp(glm_vec4 const in[4], glm_vec4 out[4]) +{ + __m128 Fac0; + { + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac0 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac1; + { + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac1 = _mm_sub_ps(Mul00, Mul01); + } + + + __m128 Fac2; + { + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac2 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac3; + { + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac3 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac4; + { + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac4 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac5; + { + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac5 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); + __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); + + // m[1][0] + // m[0][0] + // m[0][0] + // m[0][0] + __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][1] + // m[0][1] + // m[0][1] + // m[0][1] + __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][2] + // m[0][2] + // m[0][2] + // m[0][2] + __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][3] + // m[0][3] + // m[0][3] + // m[0][3] + __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); + + // col0 + // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), + // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), + // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), + // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), + __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); + __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); + __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); + __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); + __m128 Add00 = _mm_add_ps(Sub00, Mul02); + __m128 Inv0 = _mm_mul_ps(SignB, Add00); + + // col1 + // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), + // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), + // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), + // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), + __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); + __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); + __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); + __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); + __m128 Add01 = _mm_add_ps(Sub01, Mul05); + __m128 Inv1 = _mm_mul_ps(SignA, Add01); + + // col2 + // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), + // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), + // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), + // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), + __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); + __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); + __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); + __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); + __m128 Add02 = _mm_add_ps(Sub02, Mul08); + __m128 Inv2 = _mm_mul_ps(SignB, Add02); + + // col3 + // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), + // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), + // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), + // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); + __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); + __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); + __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); + __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); + __m128 Add03 = _mm_add_ps(Sub03, Mul11); + __m128 Inv3 = _mm_mul_ps(SignA, Add03); + + __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); + + // valType Determinant = m[0][0] * Inverse[0][0] + // + m[0][1] * Inverse[1][0] + // + m[0][2] * Inverse[2][0] + // + m[0][3] * Inverse[3][0]; + __m128 Det0 = glm_vec4_dot(in[0], Row2); + __m128 Rcp0 = _mm_rcp_ps(Det0); + //__m128 Rcp0 = _mm_div_ps(one, Det0); + // Inverse /= Determinant; + out[0] = _mm_mul_ps(Inv0, Rcp0); + out[1] = _mm_mul_ps(Inv1, Rcp0); + out[2] = _mm_mul_ps(Inv2, Rcp0); + out[3] = _mm_mul_ps(Inv3, Rcp0); +} +/* +GLM_FUNC_QUALIFIER void glm_mat4_rotate(__m128 const in[4], float Angle, float const v[3], __m128 out[4]) +{ + float a = glm::radians(Angle); + float c = cos(a); + float s = sin(a); + + glm::vec4 AxisA(v[0], v[1], v[2], float(0)); + __m128 AxisB = _mm_set_ps(AxisA.w, AxisA.z, AxisA.y, AxisA.x); + __m128 AxisC = detail::sse_nrm_ps(AxisB); + + __m128 Cos0 = _mm_set_ss(c); + __m128 CosA = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Sin0 = _mm_set_ss(s); + __m128 SinA = _mm_shuffle_ps(Sin0, Sin0, _MM_SHUFFLE(0, 0, 0, 0)); + + // vec<3, T, Q> temp = (valType(1) - c) * axis; + __m128 Temp0 = _mm_sub_ps(one, CosA); + __m128 Temp1 = _mm_mul_ps(Temp0, AxisC); + + //Rotate[0][0] = c + temp[0] * axis[0]; + //Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2]; + //Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1]; + __m128 Axis0 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 TmpA0 = _mm_mul_ps(Axis0, AxisC); + __m128 CosA0 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 1, 0)); + __m128 TmpA1 = _mm_add_ps(CosA0, TmpA0); + __m128 SinA0 = SinA;//_mm_set_ps(0.0f, s, -s, 0.0f); + __m128 TmpA2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 1, 2, 3)); + __m128 TmpA3 = _mm_mul_ps(SinA0, TmpA2); + __m128 TmpA4 = _mm_add_ps(TmpA1, TmpA3); + + //Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2]; + //Rotate[1][1] = c + temp[1] * axis[1]; + //Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0]; + __m128 Axis1 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 TmpB0 = _mm_mul_ps(Axis1, AxisC); + __m128 CosA1 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 0, 1)); + __m128 TmpB1 = _mm_add_ps(CosA1, TmpB0); + __m128 SinB0 = SinA;//_mm_set_ps(-s, 0.0f, s, 0.0f); + __m128 TmpB2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 0, 3, 2)); + __m128 TmpB3 = _mm_mul_ps(SinA0, TmpB2); + __m128 TmpB4 = _mm_add_ps(TmpB1, TmpB3); + + //Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1]; + //Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0]; + //Rotate[2][2] = c + temp[2] * axis[2]; + __m128 Axis2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 TmpC0 = _mm_mul_ps(Axis2, AxisC); + __m128 CosA2 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 0, 1, 1)); + __m128 TmpC1 = _mm_add_ps(CosA2, TmpC0); + __m128 SinC0 = SinA;//_mm_set_ps(s, -s, 0.0f, 0.0f); + __m128 TmpC2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 3, 0, 1)); + __m128 TmpC3 = _mm_mul_ps(SinA0, TmpC2); + __m128 TmpC4 = _mm_add_ps(TmpC1, TmpC3); + + __m128 Result[4]; + Result[0] = TmpA4; + Result[1] = TmpB4; + Result[2] = TmpC4; + Result[3] = _mm_set_ps(1, 0, 0, 0); + + //mat<4, 4, valType> Result; + //Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; + //Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; + //Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; + //Result[3] = m[3]; + //return Result; + sse_mul_ps(in, Result, out); +} +*/ +GLM_FUNC_QUALIFIER void glm_mat4_outerProduct(__m128 const& c, __m128 const& r, __m128 out[4]) +{ + out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0))); + out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1))); + out[2] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(2, 2, 2, 2))); + out[3] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(3, 3, 3, 3))); +} + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/simd/neon.h b/thirdparty/manifold/thirdparty/glm/glm/simd/neon.h new file mode 100644 index 000000000000..f85947f5c19a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/simd/neon.h @@ -0,0 +1,155 @@ +/// @ref simd_neon +/// @file glm/simd/neon.h + +#pragma once + +#if GLM_ARCH & GLM_ARCH_NEON_BIT +#include + +namespace glm { + namespace neon { + static inline float32x4_t dupq_lane(float32x4_t vsrc, int lane) { + switch(lane) { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + case 0: return vdupq_laneq_f32(vsrc, 0); + case 1: return vdupq_laneq_f32(vsrc, 1); + case 2: return vdupq_laneq_f32(vsrc, 2); + case 3: return vdupq_laneq_f32(vsrc, 3); +#else + case 0: return vdupq_n_f32(vgetq_lane_f32(vsrc, 0)); + case 1: return vdupq_n_f32(vgetq_lane_f32(vsrc, 1)); + case 2: return vdupq_n_f32(vgetq_lane_f32(vsrc, 2)); + case 3: return vdupq_n_f32(vgetq_lane_f32(vsrc, 3)); +#endif + } + assert(!"Unreachable code executed!"); + return vdupq_n_f32(0.0f); + } + + static inline float32x2_t dup_lane(float32x4_t vsrc, int lane) { + switch(lane) { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + case 0: return vdup_laneq_f32(vsrc, 0); + case 1: return vdup_laneq_f32(vsrc, 1); + case 2: return vdup_laneq_f32(vsrc, 2); + case 3: return vdup_laneq_f32(vsrc, 3); +#else + case 0: return vdup_n_f32(vgetq_lane_f32(vsrc, 0)); + case 1: return vdup_n_f32(vgetq_lane_f32(vsrc, 1)); + case 2: return vdup_n_f32(vgetq_lane_f32(vsrc, 2)); + case 3: return vdup_n_f32(vgetq_lane_f32(vsrc, 3)); +#endif + } + assert(!"Unreachable code executed!"); + return vdup_n_f32(0.0f); + } + + static inline float32x4_t copy_lane(float32x4_t vdst, int dlane, float32x4_t vsrc, int slane) { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + switch(dlane) { + case 0: + switch(slane) { + case 0: return vcopyq_laneq_f32(vdst, 0, vsrc, 0); + case 1: return vcopyq_laneq_f32(vdst, 0, vsrc, 1); + case 2: return vcopyq_laneq_f32(vdst, 0, vsrc, 2); + case 3: return vcopyq_laneq_f32(vdst, 0, vsrc, 3); + } + assert(!"Unreachable code executed!"); + case 1: + switch(slane) { + case 0: return vcopyq_laneq_f32(vdst, 1, vsrc, 0); + case 1: return vcopyq_laneq_f32(vdst, 1, vsrc, 1); + case 2: return vcopyq_laneq_f32(vdst, 1, vsrc, 2); + case 3: return vcopyq_laneq_f32(vdst, 1, vsrc, 3); + } + assert(!"Unreachable code executed!"); + case 2: + switch(slane) { + case 0: return vcopyq_laneq_f32(vdst, 2, vsrc, 0); + case 1: return vcopyq_laneq_f32(vdst, 2, vsrc, 1); + case 2: return vcopyq_laneq_f32(vdst, 2, vsrc, 2); + case 3: return vcopyq_laneq_f32(vdst, 2, vsrc, 3); + } + assert(!"Unreachable code executed!"); + case 3: + switch(slane) { + case 0: return vcopyq_laneq_f32(vdst, 3, vsrc, 0); + case 1: return vcopyq_laneq_f32(vdst, 3, vsrc, 1); + case 2: return vcopyq_laneq_f32(vdst, 3, vsrc, 2); + case 3: return vcopyq_laneq_f32(vdst, 3, vsrc, 3); + } + assert(!"Unreachable code executed!"); + } +#else + + float l; + switch(slane) { + case 0: l = vgetq_lane_f32(vsrc, 0); break; + case 1: l = vgetq_lane_f32(vsrc, 1); break; + case 2: l = vgetq_lane_f32(vsrc, 2); break; + case 3: l = vgetq_lane_f32(vsrc, 3); break; + default: + assert(!"Unreachable code executed!"); + } + switch(dlane) { + case 0: return vsetq_lane_f32(l, vdst, 0); + case 1: return vsetq_lane_f32(l, vdst, 1); + case 2: return vsetq_lane_f32(l, vdst, 2); + case 3: return vsetq_lane_f32(l, vdst, 3); + } +#endif + assert(!"Unreachable code executed!"); + return vdupq_n_f32(0.0f); + } + + static inline float32x4_t mul_lane(float32x4_t v, float32x4_t vlane, int lane) { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + switch(lane) { + case 0: return vmulq_laneq_f32(v, vlane, 0); break; + case 1: return vmulq_laneq_f32(v, vlane, 1); break; + case 2: return vmulq_laneq_f32(v, vlane, 2); break; + case 3: return vmulq_laneq_f32(v, vlane, 3); break; + default: + assert(!"Unreachable code executed!"); + } + assert(!"Unreachable code executed!"); + return vdupq_n_f32(0.0f); +#else + return vmulq_f32(v, dupq_lane(vlane, lane)); +#endif + } + + static inline float32x4_t madd_lane(float32x4_t acc, float32x4_t v, float32x4_t vlane, int lane) { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT +#ifdef GLM_CONFIG_FORCE_FMA +# define FMADD_LANE(acc, x, y, L) do { asm volatile ("fmla %0.4s, %1.4s, %2.4s" : "+w"(acc) : "w"(x), "w"(dup_lane(y, L))); } while(0) +#else +# define FMADD_LANE(acc, x, y, L) do { acc = vmlaq_laneq_f32(acc, x, y, L); } while(0) +#endif + + switch(lane) { + case 0: + FMADD_LANE(acc, v, vlane, 0); + return acc; + case 1: + FMADD_LANE(acc, v, vlane, 1); + return acc; + case 2: + FMADD_LANE(acc, v, vlane, 2); + return acc; + case 3: + FMADD_LANE(acc, v, vlane, 3); + return acc; + default: + assert(!"Unreachable code executed!"); + } + assert(!"Unreachable code executed!"); + return vdupq_n_f32(0.0f); +# undef FMADD_LANE +#else + return vaddq_f32(acc, vmulq_f32(v, dupq_lane(vlane, lane))); +#endif + } + } //namespace neon +} // namespace glm +#endif // GLM_ARCH & GLM_ARCH_NEON_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/simd/packing.h b/thirdparty/manifold/thirdparty/glm/glm/simd/packing.h new file mode 100644 index 000000000000..609163eb0d77 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/simd/packing.h @@ -0,0 +1,8 @@ +/// @ref simd +/// @file glm/simd/packing.h + +#pragma once + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/simd/platform.h b/thirdparty/manifold/thirdparty/glm/glm/simd/platform.h new file mode 100644 index 000000000000..a318b098f9cf --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/simd/platform.h @@ -0,0 +1,469 @@ +#pragma once + +/////////////////////////////////////////////////////////////////////////////////// +// Platform + +#define GLM_PLATFORM_UNKNOWN 0x00000000 +#define GLM_PLATFORM_WINDOWS 0x00010000 +#define GLM_PLATFORM_LINUX 0x00020000 +#define GLM_PLATFORM_APPLE 0x00040000 +//#define GLM_PLATFORM_IOS 0x00080000 +#define GLM_PLATFORM_ANDROID 0x00100000 +#define GLM_PLATFORM_CHROME_NACL 0x00200000 +#define GLM_PLATFORM_UNIX 0x00400000 +#define GLM_PLATFORM_QNXNTO 0x00800000 +#define GLM_PLATFORM_WINCE 0x01000000 +#define GLM_PLATFORM_CYGWIN 0x02000000 + +#ifdef GLM_FORCE_PLATFORM_UNKNOWN +# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN +#elif defined(__CYGWIN__) +# define GLM_PLATFORM GLM_PLATFORM_CYGWIN +#elif defined(__QNXNTO__) +# define GLM_PLATFORM GLM_PLATFORM_QNXNTO +#elif defined(__APPLE__) +# define GLM_PLATFORM GLM_PLATFORM_APPLE +#elif defined(WINCE) +# define GLM_PLATFORM GLM_PLATFORM_WINCE +#elif defined(_WIN32) +# define GLM_PLATFORM GLM_PLATFORM_WINDOWS +#elif defined(__native_client__) +# define GLM_PLATFORM GLM_PLATFORM_CHROME_NACL +#elif defined(__ANDROID__) +# define GLM_PLATFORM GLM_PLATFORM_ANDROID +#elif defined(__linux) +# define GLM_PLATFORM GLM_PLATFORM_LINUX +#elif defined(__unix) +# define GLM_PLATFORM GLM_PLATFORM_UNIX +#else +# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN +#endif// + +/////////////////////////////////////////////////////////////////////////////////// +// Compiler + +#define GLM_COMPILER_UNKNOWN 0x00000000 + +// Intel +#define GLM_COMPILER_INTEL 0x00100000 +#define GLM_COMPILER_INTEL14 0x00100040 +#define GLM_COMPILER_INTEL15 0x00100050 +#define GLM_COMPILER_INTEL16 0x00100060 +#define GLM_COMPILER_INTEL17 0x00100070 +#define GLM_COMPILER_INTEL18 0x00100080 +#define GLM_COMPILER_INTEL19 0x00100090 +#define GLM_COMPILER_INTEL21 0x001000A0 + +// Visual C++ defines +#define GLM_COMPILER_VC 0x01000000 +#define GLM_COMPILER_VC12 0x01000001 // Visual Studio 2013 +#define GLM_COMPILER_VC14 0x01000002 // Visual Studio 2015 +#define GLM_COMPILER_VC15 0x01000003 // Visual Studio 2017 +#define GLM_COMPILER_VC15_3 0x01000004 +#define GLM_COMPILER_VC15_5 0x01000005 +#define GLM_COMPILER_VC15_6 0x01000006 +#define GLM_COMPILER_VC15_7 0x01000007 +#define GLM_COMPILER_VC15_8 0x01000008 +#define GLM_COMPILER_VC15_9 0x01000009 +#define GLM_COMPILER_VC16 0x0100000A // Visual Studio 2019 +#define GLM_COMPILER_VC17 0x0100000B // Visual Studio 2022 + +// GCC defines +#define GLM_COMPILER_GCC 0x02000000 +#define GLM_COMPILER_GCC46 0x020000D0 +#define GLM_COMPILER_GCC47 0x020000E0 +#define GLM_COMPILER_GCC48 0x020000F0 +#define GLM_COMPILER_GCC49 0x02000100 +#define GLM_COMPILER_GCC5 0x02000200 +#define GLM_COMPILER_GCC6 0x02000300 +#define GLM_COMPILER_GCC61 0x02000800 +#define GLM_COMPILER_GCC7 0x02000400 +#define GLM_COMPILER_GCC8 0x02000500 +#define GLM_COMPILER_GCC9 0x02000600 +#define GLM_COMPILER_GCC10 0x02000700 +#define GLM_COMPILER_GCC11 0x02000800 +#define GLM_COMPILER_GCC12 0x02000900 +#define GLM_COMPILER_GCC13 0x02000A00 +#define GLM_COMPILER_GCC14 0x02000B00 + +// CUDA +#define GLM_COMPILER_CUDA 0x10000000 +#define GLM_COMPILER_CUDA75 0x10000001 +#define GLM_COMPILER_CUDA80 0x10000002 +#define GLM_COMPILER_CUDA90 0x10000004 +#define GLM_COMPILER_CUDA_RTC 0x10000100 + +// Clang +#define GLM_COMPILER_CLANG 0x20000000 +#define GLM_COMPILER_CLANG34 0x20000050 +#define GLM_COMPILER_CLANG35 0x20000060 +#define GLM_COMPILER_CLANG36 0x20000070 +#define GLM_COMPILER_CLANG37 0x20000080 +#define GLM_COMPILER_CLANG38 0x20000090 +#define GLM_COMPILER_CLANG39 0x200000A0 +#define GLM_COMPILER_CLANG4 0x200000B0 +#define GLM_COMPILER_CLANG5 0x200000C0 +#define GLM_COMPILER_CLANG6 0x200000D0 +#define GLM_COMPILER_CLANG7 0x200000E0 +#define GLM_COMPILER_CLANG8 0x200000F0 +#define GLM_COMPILER_CLANG9 0x20000100 +#define GLM_COMPILER_CLANG10 0x20000200 +#define GLM_COMPILER_CLANG11 0x20000300 +#define GLM_COMPILER_CLANG12 0x20000400 +#define GLM_COMPILER_CLANG13 0x20000500 +#define GLM_COMPILER_CLANG14 0x20000600 +#define GLM_COMPILER_CLANG15 0x20000700 +#define GLM_COMPILER_CLANG16 0x20000800 +#define GLM_COMPILER_CLANG17 0x20000900 +#define GLM_COMPILER_CLANG18 0x20000A00 +#define GLM_COMPILER_CLANG19 0x20000B00 + +// HIP +#define GLM_COMPILER_HIP 0x40000000 + +// Build model +#define GLM_MODEL_32 0x00000010 +#define GLM_MODEL_64 0x00000020 + +// Force generic C++ compiler +#ifdef GLM_FORCE_COMPILER_UNKNOWN +# define GLM_COMPILER GLM_COMPILER_UNKNOWN + +#elif defined(__INTEL_COMPILER) +# if __INTEL_COMPILER >= 2021 +# define GLM_COMPILER GLM_COMPILER_INTEL21 +# elif __INTEL_COMPILER >= 1900 +# define GLM_COMPILER GLM_COMPILER_INTEL19 +# elif __INTEL_COMPILER >= 1800 +# define GLM_COMPILER GLM_COMPILER_INTEL18 +# elif __INTEL_COMPILER >= 1700 +# define GLM_COMPILER GLM_COMPILER_INTEL17 +# elif __INTEL_COMPILER >= 1600 +# define GLM_COMPILER GLM_COMPILER_INTEL16 +# elif __INTEL_COMPILER >= 1500 +# define GLM_COMPILER GLM_COMPILER_INTEL15 +# elif __INTEL_COMPILER >= 1400 +# define GLM_COMPILER GLM_COMPILER_INTEL14 +# elif __INTEL_COMPILER < 1400 +# error "GLM requires ICC 2013 SP1 or newer" +# endif + +// CUDA +#elif defined(__CUDACC__) +# if !defined(CUDA_VERSION) && !defined(GLM_FORCE_CUDA) +# include // make sure version is defined since nvcc does not define it itself! +# endif +# if defined(__CUDACC_RTC__) +# define GLM_COMPILER GLM_COMPILER_CUDA_RTC +# elif CUDA_VERSION >= 8000 +# define GLM_COMPILER GLM_COMPILER_CUDA80 +# elif CUDA_VERSION >= 7500 +# define GLM_COMPILER GLM_COMPILER_CUDA75 +# elif CUDA_VERSION >= 7000 +# define GLM_COMPILER GLM_COMPILER_CUDA70 +# elif CUDA_VERSION < 7000 +# error "GLM requires CUDA 7.0 or higher" +# endif + +// HIP +#elif defined(__HIP__) +# define GLM_COMPILER GLM_COMPILER_HIP + +// Clang +#elif defined(__clang__) +# if defined(__apple_build_version__) +# if (__clang_major__ < 6) +# error "GLM requires Clang 3.4 / Apple Clang 6.0 or higher" +# elif __clang_major__ == 6 && __clang_minor__ == 0 +# define GLM_COMPILER GLM_COMPILER_CLANG35 +# elif __clang_major__ == 6 && __clang_minor__ >= 1 +# define GLM_COMPILER GLM_COMPILER_CLANG36 +# elif __clang_major__ >= 7 +# define GLM_COMPILER GLM_COMPILER_CLANG37 +# endif +# else +# if ((__clang_major__ == 3) && (__clang_minor__ < 4)) || (__clang_major__ < 3) +# error "GLM requires Clang 3.4 or higher" +# elif __clang_major__ == 3 && __clang_minor__ == 4 +# define GLM_COMPILER GLM_COMPILER_CLANG34 +# elif __clang_major__ == 3 && __clang_minor__ == 5 +# define GLM_COMPILER GLM_COMPILER_CLANG35 +# elif __clang_major__ == 3 && __clang_minor__ == 6 +# define GLM_COMPILER GLM_COMPILER_CLANG36 +# elif __clang_major__ == 3 && __clang_minor__ == 7 +# define GLM_COMPILER GLM_COMPILER_CLANG37 +# elif __clang_major__ == 3 && __clang_minor__ == 8 +# define GLM_COMPILER GLM_COMPILER_CLANG38 +# elif __clang_major__ == 3 && __clang_minor__ >= 9 +# define GLM_COMPILER GLM_COMPILER_CLANG39 +# elif __clang_major__ == 4 && __clang_minor__ == 0 +# define GLM_COMPILER GLM_COMPILER_CLANG4 +# elif __clang_major__ == 5 +# define GLM_COMPILER GLM_COMPILER_CLANG5 +# elif __clang_major__ == 6 +# define GLM_COMPILER GLM_COMPILER_CLANG6 +# elif __clang_major__ == 7 +# define GLM_COMPILER GLM_COMPILER_CLANG7 +# elif __clang_major__ == 8 +# define GLM_COMPILER GLM_COMPILER_CLANG8 +# elif __clang_major__ == 9 +# define GLM_COMPILER GLM_COMPILER_CLANG9 +# elif __clang_major__ == 10 +# define GLM_COMPILER GLM_COMPILER_CLANG10 +# elif __clang_major__ == 11 +# define GLM_COMPILER GLM_COMPILER_CLANG11 +# elif __clang_major__ == 12 +# define GLM_COMPILER GLM_COMPILER_CLANG12 +# elif __clang_major__ == 13 +# define GLM_COMPILER GLM_COMPILER_CLANG13 +# elif __clang_major__ == 14 +# define GLM_COMPILER GLM_COMPILER_CLANG14 +# elif __clang_major__ == 15 +# define GLM_COMPILER GLM_COMPILER_CLANG15 +# elif __clang_major__ == 16 +# define GLM_COMPILER GLM_COMPILER_CLANG16 +# elif __clang_major__ == 17 +# define GLM_COMPILER GLM_COMPILER_CLANG17 +# elif __clang_major__ == 18 +# define GLM_COMPILER GLM_COMPILER_CLANG18 +# elif __clang_major__ >= 19 +# define GLM_COMPILER GLM_COMPILER_CLANG19 +# endif +# endif + +// Visual C++ +#elif defined(_MSC_VER) +# if _MSC_VER >= 1930 +# define GLM_COMPILER GLM_COMPILER_VC17 +# elif _MSC_VER >= 1920 +# define GLM_COMPILER GLM_COMPILER_VC16 +# elif _MSC_VER >= 1916 +# define GLM_COMPILER GLM_COMPILER_VC15_9 +# elif _MSC_VER >= 1915 +# define GLM_COMPILER GLM_COMPILER_VC15_8 +# elif _MSC_VER >= 1914 +# define GLM_COMPILER GLM_COMPILER_VC15_7 +# elif _MSC_VER >= 1913 +# define GLM_COMPILER GLM_COMPILER_VC15_6 +# elif _MSC_VER >= 1912 +# define GLM_COMPILER GLM_COMPILER_VC15_5 +# elif _MSC_VER >= 1911 +# define GLM_COMPILER GLM_COMPILER_VC15_3 +# elif _MSC_VER >= 1910 +# define GLM_COMPILER GLM_COMPILER_VC15 +# elif _MSC_VER >= 1900 +# define GLM_COMPILER GLM_COMPILER_VC14 +# elif _MSC_VER >= 1800 +# define GLM_COMPILER GLM_COMPILER_VC12 +# elif _MSC_VER < 1800 +# error "GLM requires Visual C++ 12 - 2013 or higher" +# endif//_MSC_VER + +// G++ +#elif defined(__GNUC__) || defined(__MINGW32__) +# if __GNUC__ >= 14 +# define GLM_COMPILER GLM_COMPILER_GCC14 +# elif __GNUC__ >= 13 +# define GLM_COMPILER GLM_COMPILER_GCC13 +# elif __GNUC__ >= 12 +# define GLM_COMPILER GLM_COMPILER_GCC12 +# elif __GNUC__ >= 11 +# define GLM_COMPILER GLM_COMPILER_GCC11 +# elif __GNUC__ >= 10 +# define GLM_COMPILER GLM_COMPILER_GCC10 +# elif __GNUC__ >= 9 +# define GLM_COMPILER GLM_COMPILER_GCC9 +# elif __GNUC__ >= 8 +# define GLM_COMPILER GLM_COMPILER_GCC8 +# elif __GNUC__ >= 7 +# define GLM_COMPILER GLM_COMPILER_GCC7 +# elif __GNUC__ >= 6 +# define GLM_COMPILER GLM_COMPILER_GCC6 +# elif __GNUC__ >= 5 +# define GLM_COMPILER GLM_COMPILER_GCC5 +# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 +# define GLM_COMPILER GLM_COMPILER_GCC49 +# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 8 +# define GLM_COMPILER GLM_COMPILER_GCC48 +# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 7 +# define GLM_COMPILER GLM_COMPILER_GCC47 +# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 6 +# define GLM_COMPILER GLM_COMPILER_GCC46 +# elif ((__GNUC__ == 4) && (__GNUC_MINOR__ < 6)) || (__GNUC__ < 4) +# error "GLM requires GCC 4.6 or higher" +# endif + +#else +# define GLM_COMPILER GLM_COMPILER_UNKNOWN +#endif + +#ifndef GLM_COMPILER +# error "GLM_COMPILER undefined, your compiler may not be supported by GLM. Add #define GLM_COMPILER 0 to ignore this message." +#endif//GLM_COMPILER + +/////////////////////////////////////////////////////////////////////////////////// +// Instruction sets + +// User defines: GLM_FORCE_PURE GLM_FORCE_INTRINSICS GLM_FORCE_SSE2 GLM_FORCE_SSE3 GLM_FORCE_AVX GLM_FORCE_AVX2 GLM_FORCE_AVX2 + +#define GLM_ARCH_MIPS_BIT (0x10000000) +#define GLM_ARCH_PPC_BIT (0x20000000) +#define GLM_ARCH_ARM_BIT (0x40000000) +#define GLM_ARCH_ARMV8_BIT (0x01000000) +#define GLM_ARCH_X86_BIT (0x80000000) + +#define GLM_ARCH_SIMD_BIT (0x00001000) + +#define GLM_ARCH_NEON_BIT (0x00000001) +#define GLM_ARCH_SSE_BIT (0x00000002) +#define GLM_ARCH_SSE2_BIT (0x00000004) +#define GLM_ARCH_SSE3_BIT (0x00000008) +#define GLM_ARCH_SSSE3_BIT (0x00000010) +#define GLM_ARCH_SSE41_BIT (0x00000020) +#define GLM_ARCH_SSE42_BIT (0x00000040) +#define GLM_ARCH_AVX_BIT (0x00000080) +#define GLM_ARCH_AVX2_BIT (0x00000100) + +#define GLM_ARCH_UNKNOWN (0) +#define GLM_ARCH_X86 (GLM_ARCH_X86_BIT) +#define GLM_ARCH_SSE (GLM_ARCH_SSE_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_X86) +#define GLM_ARCH_SSE2 (GLM_ARCH_SSE2_BIT | GLM_ARCH_SSE) +#define GLM_ARCH_SSE3 (GLM_ARCH_SSE3_BIT | GLM_ARCH_SSE2) +#define GLM_ARCH_SSSE3 (GLM_ARCH_SSSE3_BIT | GLM_ARCH_SSE3) +#define GLM_ARCH_SSE41 (GLM_ARCH_SSE41_BIT | GLM_ARCH_SSSE3) +#define GLM_ARCH_SSE42 (GLM_ARCH_SSE42_BIT | GLM_ARCH_SSE41) +#define GLM_ARCH_AVX (GLM_ARCH_AVX_BIT | GLM_ARCH_SSE42) +#define GLM_ARCH_AVX2 (GLM_ARCH_AVX2_BIT | GLM_ARCH_AVX) +#define GLM_ARCH_ARM (GLM_ARCH_ARM_BIT) +#define GLM_ARCH_ARMV8 (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM | GLM_ARCH_ARMV8_BIT) +#define GLM_ARCH_NEON (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM) +#define GLM_ARCH_MIPS (GLM_ARCH_MIPS_BIT) +#define GLM_ARCH_PPC (GLM_ARCH_PPC_BIT) + +#if defined(GLM_FORCE_ARCH_UNKNOWN) || defined(GLM_FORCE_PURE) +# define GLM_ARCH GLM_ARCH_UNKNOWN +#elif defined(GLM_FORCE_NEON) +# if __ARM_ARCH >= 8 +# define GLM_ARCH (GLM_ARCH_ARMV8) +# else +# define GLM_ARCH (GLM_ARCH_NEON) +# endif +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_AVX2) +# define GLM_ARCH (GLM_ARCH_AVX2) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_AVX) +# define GLM_ARCH (GLM_ARCH_AVX) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSE42) +# define GLM_ARCH (GLM_ARCH_SSE42) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSE41) +# define GLM_ARCH (GLM_ARCH_SSE41) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSSE3) +# define GLM_ARCH (GLM_ARCH_SSSE3) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSE3) +# define GLM_ARCH (GLM_ARCH_SSE3) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSE2) +# define GLM_ARCH (GLM_ARCH_SSE2) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSE) +# define GLM_ARCH (GLM_ARCH_SSE) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_INTRINSICS) && !defined(GLM_FORCE_XYZW_ONLY) +# if defined(__AVX2__) +# define GLM_ARCH (GLM_ARCH_AVX2) +# elif defined(__AVX__) +# define GLM_ARCH (GLM_ARCH_AVX) +# elif defined(__SSE4_2__) +# define GLM_ARCH (GLM_ARCH_SSE42) +# elif defined(__SSE4_1__) +# define GLM_ARCH (GLM_ARCH_SSE41) +# elif defined(__SSSE3__) +# define GLM_ARCH (GLM_ARCH_SSSE3) +# elif defined(__SSE3__) +# define GLM_ARCH (GLM_ARCH_SSE3) +# elif defined(__SSE2__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86_FP) +# define GLM_ARCH (GLM_ARCH_SSE2) +# elif defined(__i386__) +# define GLM_ARCH (GLM_ARCH_X86) +# elif defined(__ARM_ARCH) && (__ARM_ARCH >= 8) +# define GLM_ARCH (GLM_ARCH_ARMV8) +# elif defined(__ARM_NEON) +# define GLM_ARCH (GLM_ARCH_ARM | GLM_ARCH_NEON) +# elif defined(__arm__ ) || defined(_M_ARM) +# define GLM_ARCH (GLM_ARCH_ARM) +# elif defined(__mips__ ) +# define GLM_ARCH (GLM_ARCH_MIPS) +# elif defined(__powerpc__ ) || defined(_M_PPC) +# define GLM_ARCH (GLM_ARCH_PPC) +# else +# define GLM_ARCH (GLM_ARCH_UNKNOWN) +# endif +#else +# if defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86) || defined(__i386__) +# define GLM_ARCH (GLM_ARCH_X86) +# elif defined(__arm__) || defined(_M_ARM) +# define GLM_ARCH (GLM_ARCH_ARM) +# elif defined(__powerpc__) || defined(_M_PPC) +# define GLM_ARCH (GLM_ARCH_PPC) +# elif defined(__mips__) +# define GLM_ARCH (GLM_ARCH_MIPS) +# else +# define GLM_ARCH (GLM_ARCH_UNKNOWN) +# endif +#endif + +#if GLM_ARCH & GLM_ARCH_AVX2_BIT +# include +#elif GLM_ARCH & GLM_ARCH_AVX_BIT +# include +#elif GLM_ARCH & GLM_ARCH_SSE42_BIT +# if GLM_COMPILER & GLM_COMPILER_CLANG +# include +# endif +# include +#elif GLM_ARCH & GLM_ARCH_SSE41_BIT +# include +#elif GLM_ARCH & GLM_ARCH_SSSE3_BIT +# include +#elif GLM_ARCH & GLM_ARCH_SSE3_BIT +# include +#elif GLM_ARCH & GLM_ARCH_SSE2_BIT +# include +#elif GLM_ARCH & GLM_ARCH_NEON_BIT +# include "neon.h" +#endif//GLM_ARCH + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + typedef __m128 glm_f32vec4; + typedef __m128i glm_i32vec4; + typedef __m128i glm_u32vec4; + typedef __m128d glm_f64vec2; + typedef __m128i glm_i64vec2; + typedef __m128i glm_u64vec2; + + typedef glm_f32vec4 glm_vec4; + typedef glm_i32vec4 glm_ivec4; + typedef glm_u32vec4 glm_uvec4; + typedef glm_f64vec2 glm_dvec2; +#endif + +#if GLM_ARCH & GLM_ARCH_AVX_BIT + typedef __m256d glm_f64vec4; + typedef glm_f64vec4 glm_dvec4; +#endif + +#if GLM_ARCH & GLM_ARCH_AVX2_BIT + typedef __m256i glm_i64vec4; + typedef __m256i glm_u64vec4; +#endif + +#if GLM_ARCH & GLM_ARCH_NEON_BIT + typedef float32x4_t glm_f32vec4; + typedef int32x4_t glm_i32vec4; + typedef uint32x4_t glm_u32vec4; +#endif diff --git a/thirdparty/manifold/thirdparty/glm/glm/simd/trigonometric.h b/thirdparty/manifold/thirdparty/glm/glm/simd/trigonometric.h new file mode 100644 index 000000000000..739b796e7e45 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/simd/trigonometric.h @@ -0,0 +1,9 @@ +/// @ref simd +/// @file glm/simd/trigonometric.h + +#pragma once + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT + diff --git a/thirdparty/manifold/thirdparty/glm/glm/simd/vector_relational.h b/thirdparty/manifold/thirdparty/glm/glm/simd/vector_relational.h new file mode 100644 index 000000000000..f7385e974736 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/simd/vector_relational.h @@ -0,0 +1,8 @@ +/// @ref simd +/// @file glm/simd/vector_relational.h + +#pragma once + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/thirdparty/manifold/thirdparty/glm/glm/trigonometric.hpp b/thirdparty/manifold/thirdparty/glm/glm/trigonometric.hpp new file mode 100644 index 000000000000..51d49c132bb5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/trigonometric.hpp @@ -0,0 +1,210 @@ +/// @ref core +/// @file glm/trigonometric.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions +/// +/// @defgroup core_func_trigonometric Angle and Trigonometry Functions +/// @ingroup core +/// +/// Function parameters specified as angle are assumed to be in units of radians. +/// In no case will any of these functions result in a divide by zero error. If +/// the divisor of a ratio is 0, then results will be undefined. +/// +/// These all operate component-wise. The description is per component. +/// +/// Include to use these core features. +/// +/// @see ext_vector_trigonometric + +#pragma once + +#include "detail/setup.hpp" +#include "detail/qualifier.hpp" + +namespace glm +{ + /// @addtogroup core_func_trigonometric + /// @{ + + /// Converts degrees to radians and returns the result. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL radians man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec radians(vec const& degrees); + + /// Converts radians to degrees and returns the result. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL degrees man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec degrees(vec const& radians); + + /// The standard trigonometric sine function. + /// The values returned by this function will range from [-1, 1]. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL sin man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec sin(vec const& angle); + + /// The standard trigonometric cosine function. + /// The values returned by this function will range from [-1, 1]. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL cos man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec cos(vec const& angle); + + /// The standard trigonometric tangent function. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL tan man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec tan(vec const& angle); + + /// Arc sine. Returns an angle whose sine is x. + /// The range of values returned by this function is [-PI/2, PI/2]. + /// Results are undefined if |x| > 1. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL asin man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec asin(vec const& x); + + /// Arc cosine. Returns an angle whose cosine is x. + /// The range of values returned by this function is [0, PI]. + /// Results are undefined if |x| > 1. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL acos man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec acos(vec const& x); + + /// Arc tangent. Returns an angle whose tangent is y/x. + /// The signs of x and y are used to determine what + /// quadrant the angle is in. The range of values returned + /// by this function is [-PI, PI]. Results are undefined + /// if x and y are both 0. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL atan man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec atan(vec const& y, vec const& x); + + /// Arc tangent. Returns an angle whose tangent is y_over_x. + /// The range of values returned by this function is [-PI/2, PI/2]. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL atan man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec atan(vec const& y_over_x); + + /// Returns the hyperbolic sine function, (exp(x) - exp(-x)) / 2 + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL sinh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec sinh(vec const& angle); + + /// Returns the hyperbolic cosine function, (exp(x) + exp(-x)) / 2 + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL cosh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec cosh(vec const& angle); + + /// Returns the hyperbolic tangent function, sinh(angle) / cosh(angle) + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL tanh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec tanh(vec const& angle); + + /// Arc hyperbolic sine; returns the inverse of sinh. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL asinh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec asinh(vec const& x); + + /// Arc hyperbolic cosine; returns the non-negative inverse + /// of cosh. Results are undefined if x < 1. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL acosh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec acosh(vec const& x); + + /// Arc hyperbolic tangent; returns the inverse of tanh. + /// Results are undefined if abs(x) >= 1. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL atanh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec atanh(vec const& x); + + /// @} +}//namespace glm + +#include "detail/func_trigonometric.inl" diff --git a/thirdparty/manifold/thirdparty/glm/glm/vec2.hpp b/thirdparty/manifold/thirdparty/glm/glm/vec2.hpp new file mode 100644 index 000000000000..cd4e0708e109 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/vec2.hpp @@ -0,0 +1,14 @@ +/// @ref core +/// @file glm/vec2.hpp + +#pragma once +#include "./ext/vector_bool2.hpp" +#include "./ext/vector_bool2_precision.hpp" +#include "./ext/vector_float2.hpp" +#include "./ext/vector_float2_precision.hpp" +#include "./ext/vector_double2.hpp" +#include "./ext/vector_double2_precision.hpp" +#include "./ext/vector_int2.hpp" +#include "./ext/vector_int2_sized.hpp" +#include "./ext/vector_uint2.hpp" +#include "./ext/vector_uint2_sized.hpp" diff --git a/thirdparty/manifold/thirdparty/glm/glm/vec3.hpp b/thirdparty/manifold/thirdparty/glm/glm/vec3.hpp new file mode 100644 index 000000000000..f5a927dbe4c5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/vec3.hpp @@ -0,0 +1,14 @@ +/// @ref core +/// @file glm/vec3.hpp + +#pragma once +#include "./ext/vector_bool3.hpp" +#include "./ext/vector_bool3_precision.hpp" +#include "./ext/vector_float3.hpp" +#include "./ext/vector_float3_precision.hpp" +#include "./ext/vector_double3.hpp" +#include "./ext/vector_double3_precision.hpp" +#include "./ext/vector_int3.hpp" +#include "./ext/vector_int3_sized.hpp" +#include "./ext/vector_uint3.hpp" +#include "./ext/vector_uint3_sized.hpp" diff --git a/thirdparty/manifold/thirdparty/glm/glm/vec4.hpp b/thirdparty/manifold/thirdparty/glm/glm/vec4.hpp new file mode 100644 index 000000000000..c6ea9f1ff4c5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/vec4.hpp @@ -0,0 +1,15 @@ +/// @ref core +/// @file glm/vec4.hpp + +#pragma once +#include "./ext/vector_bool4.hpp" +#include "./ext/vector_bool4_precision.hpp" +#include "./ext/vector_float4.hpp" +#include "./ext/vector_float4_precision.hpp" +#include "./ext/vector_double4.hpp" +#include "./ext/vector_double4_precision.hpp" +#include "./ext/vector_int4.hpp" +#include "./ext/vector_int4_sized.hpp" +#include "./ext/vector_uint4.hpp" +#include "./ext/vector_uint4_sized.hpp" + diff --git a/thirdparty/manifold/thirdparty/glm/glm/vector_relational.hpp b/thirdparty/manifold/thirdparty/glm/glm/vector_relational.hpp new file mode 100644 index 000000000000..a0fe17eb707a --- /dev/null +++ b/thirdparty/manifold/thirdparty/glm/glm/vector_relational.hpp @@ -0,0 +1,121 @@ +/// @ref core +/// @file glm/vector_relational.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions +/// +/// @defgroup core_func_vector_relational Vector Relational Functions +/// @ingroup core +/// +/// Relational and equality operators (<, <=, >, >=, ==, !=) are defined to +/// operate on scalars and produce scalar Boolean results. For vector results, +/// use the following built-in functions. +/// +/// In all cases, the sizes of all the input and return vectors for any particular +/// call must match. +/// +/// Include to use these core features. +/// +/// @see ext_vector_relational + +#pragma once + +#include "detail/qualifier.hpp" +#include "detail/setup.hpp" + +namespace glm +{ + /// @addtogroup core_func_vector_relational + /// @{ + + /// Returns the component-wise comparison result of x < y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point or integer scalar type. + /// + /// @see GLSL lessThan man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec lessThan(vec const& x, vec const& y); + + /// Returns the component-wise comparison of result x <= y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point or integer scalar type. + /// + /// @see GLSL lessThanEqual man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec lessThanEqual(vec const& x, vec const& y); + + /// Returns the component-wise comparison of result x > y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point or integer scalar type. + /// + /// @see GLSL greaterThan man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec greaterThan(vec const& x, vec const& y); + + /// Returns the component-wise comparison of result x >= y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point or integer scalar type. + /// + /// @see GLSL greaterThanEqual man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec greaterThanEqual(vec const& x, vec const& y); + + /// Returns the component-wise comparison of result x == y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point, integer or bool scalar type. + /// + /// @see GLSL equal man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y); + + /// Returns the component-wise comparison of result x != y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point, integer or bool scalar type. + /// + /// @see GLSL notEqual man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y); + + /// Returns true if any component of x is true. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL any man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR bool any(vec const& v); + + /// Returns true if all components of x are true. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL all man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR bool all(vec const& v); + + /// Returns the component-wise logical complement of x. + /// /!\ Because of language incompatibilities between C++ and GLSL, GLM defines the function not but not_ instead. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL not man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec not_(vec const& v); + + /// @} +}//namespace glm + +#include "detail/func_vector_relational.inl" diff --git a/thirdparty/manifold/thirdparty/thrust/.gitrepo b/thirdparty/manifold/thirdparty/thrust/.gitrepo new file mode 100644 index 000000000000..f317a7ec8608 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/ingydotnet/git-subrepo#readme +; +[subrepo] + remote = https://github.com/NVIDIA/thrust.git + branch = 2.1.0 + commit = 3cd56842c94de4926157f6ccdfbbf03ef7e5d5dc + parent = 2150401aec548d8d3ca01554d59b2e3e113db527 + method = merge + cmdver = 0.4.6 diff --git a/thirdparty/manifold/thirdparty/thrust/LICENSE b/thirdparty/manifold/thirdparty/thrust/LICENSE new file mode 100644 index 000000000000..c22c225631e0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/LICENSE @@ -0,0 +1,249 @@ +Unless otherwise noted, Thrust's source code is released under the Apache +License, Version 2.0: + +================================================================================ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +================================================================================ + +Some portions of Thrust may be licensed under other compatible open-source +licenses. Any divergence from the Apache 2 license will be noted in the source +code where applicable. + +Portions under other terms include, but are not limited to: + +================================================================================ + +Various C++ utility classes in Thrust are based on the Boost Iterator, Tuple, +System, and Random Number libraries, which are provided under the Boost Software +License: + + Boost Software License - Version 1.0 - August 17th, 2003 + + Permission is hereby granted, free of charge, to any person or organization + obtaining a copy of the software and accompanying documentation covered by + this license (the "Software") to use, reproduce, display, distribute, + execute, and transmit the Software, and to prepare derivative works of the + Software, and to permit third-parties to whom the Software is furnished to + do so, all subject to the following: + + The copyright notices in the Software and this entire statement, including + the above license grant, this restriction and the following disclaimer, + must be included in all copies of the Software, in whole or in part, and + all derivative works of the Software, unless such copies or derivative + works are solely in the form of machine-executable object code generated by + a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT + SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE + FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +================================================================================ + +Portions of the thrust::complex implementation are derived from FreeBSD with the +following terms: + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice[1] unmodified, this list of conditions, and the following + disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +[1] Individual copyright notices from the original authors are included in + the relevant source files. + +================================================================================ diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/.gitrepo b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/.gitrepo new file mode 100644 index 000000000000..b3ac4946d915 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/ingydotnet/git-subrepo#readme +; +[subrepo] + remote = https://github.com/NVIDIA/libcudacxx.git + branch = 55dd2c99346baa3a14949a0f7e9c41865e434eda + commit = 55dd2c99346baa3a14949a0f7e9c41865e434eda + parent = 2308846c6cc63451eee7932ca936b112aad00c4e + method = merge + cmdver = 0.4.6 diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/LICENSE.TXT b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/LICENSE.TXT new file mode 100644 index 000000000000..bef1adba4af2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/LICENSE.TXT @@ -0,0 +1,368 @@ +============================================================================== +libcu++ is under the Apache License v2.0 with LLVM Exceptions: +============================================================================== + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +---- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + +============================================================================== +Software from third parties included in the LLVM Project: +============================================================================== +The LLVM Project contains third party software which is under different license +terms. All such code will be identified clearly using at least one of two +mechanisms: +1) It will be in a separate directory tree with its own `LICENSE.txt` or + `LICENSE` file at the top containing the specific license and restrictions + which apply to that software, or +2) It will contain specific license and restriction terms at the top of every + file. + +============================================================================== +Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy): +============================================================================== + +The libc++ library is dual licensed under both the University of Illinois +"BSD-Like" license and the MIT license. As a user of this code you may choose +to use it under either license. As a contributor, you agree to allow your code +to be used under both. + +Full text of the relevant licenses is included below. + +============================================================================== + +University of Illinois/NCSA +Open Source License + +Copyright (c) 2009-2019 by the contributors listed in CREDITS.TXT + +All rights reserved. + +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. + +============================================================================== + +Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +============================================================================== + +Some libcudacxx components are covered by the below license. Each source file +indicates which license it is under. + +============================================================================== + +NVIDIA SOFTWARE LICENSE + +This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + +This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. + +You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. + +1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. + +2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: +a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. +b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. + +3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: +a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. +b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. +c. You may not modify or create derivative works of any portion of the SOFTWARE. +d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. +e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. +f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. +g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + +4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. + +5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. + +6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. + +7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. + +8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + +9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + +10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. + +11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + +12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. + +13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. + +14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + +15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. + +(v. August 20, 2021) + diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/annotated_ptr b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/annotated_ptr new file mode 100644 index 000000000000..fcb6e20d71ac --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/annotated_ptr @@ -0,0 +1,337 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * NVIDIA SOFTWARE LICENSE + * + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. + * + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. + * + * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. + * + * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: + * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. + * + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. + * + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. + * + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. + * + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + * + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. + * + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. + * + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. + * + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. + * + * (v. August 20, 2021) + */ + +#ifndef _CUDA_ANNOTATED_PTR +#define _CUDA_ANNOTATED_PTR + +#include +#include + +#include "std/detail/__access_property" + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + +class access_property { + private: + std::uint64_t __descriptor = 0; + + public: + struct shared {}; + struct global {}; + struct persisting { + __host__ __device__ constexpr operator cudaAccessProperty() const noexcept { + return cudaAccessProperty::cudaAccessPropertyPersisting; + } + }; + struct streaming { + __host__ __device__ constexpr operator cudaAccessProperty() const noexcept { + return cudaAccessProperty::cudaAccessPropertyStreaming; + } + }; + struct normal { + __host__ __device__ constexpr operator cudaAccessProperty() const noexcept { + return cudaAccessProperty::cudaAccessPropertyNormal; + } + }; + + __host__ __device__ constexpr access_property(global) noexcept : __descriptor(__detail_ap::__sm_80::__interleave_normal()) {} + __host__ __device__ constexpr access_property() noexcept : __descriptor(__detail_ap::__sm_80::__interleave_normal()) {} + constexpr access_property(access_property const&) noexcept = default; + access_property& operator=(const access_property& other) noexcept = default; + + __host__ __device__ constexpr access_property(normal, float __fraction) : __descriptor(__detail_ap::__interleave(normal{}, __fraction)) {} + __host__ __device__ constexpr access_property(streaming, float __fraction) : __descriptor(__detail_ap::__interleave(streaming{}, __fraction)) {} + __host__ __device__ constexpr access_property(persisting, float __fraction) : __descriptor(__detail_ap::__interleave(persisting{}, __fraction)) {} + __host__ __device__ constexpr access_property(normal, float __fraction, streaming) : __descriptor(__detail_ap::__interleave(normal{}, __fraction, streaming{})) {} + __host__ __device__ constexpr access_property(persisting, float __fraction, streaming) : __descriptor(__detail_ap::__interleave(persisting{}, __fraction, streaming{})) {} + + __host__ __device__ constexpr access_property(normal) noexcept : access_property(normal{}, 1.0) {} + __host__ __device__ constexpr access_property(streaming) noexcept : access_property(streaming{}, 1.0) {} + __host__ __device__ constexpr access_property(persisting) noexcept : access_property(persisting{}, 1.0) {} + + __host__ __device__ constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, normal) + : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, normal{})) {} + __host__ __device__ constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, streaming) + : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, streaming{})) {} + __host__ __device__ constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, persisting) + : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, persisting{})) {} + __host__ __device__ constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, normal, streaming) + : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, normal{}, streaming{})) {} + __host__ __device__ constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, persisting, streaming) + : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, persisting{}, streaming{})) {} + + __host__ __device__ constexpr explicit operator std::uint64_t() const noexcept { return __descriptor; } +}; + +_LIBCUDACXX_END_NAMESPACE_CUDA + +#include "std/detail/__annotated_ptr" + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + +template +__host__ __device__ +_Tp* associate_access_property(_Tp* __ptr, _Property __prop) { + static_assert( + std::is_same<_Property, access_property>::value || + std::is_same<_Property, access_property::persisting>::value || + std::is_same<_Property, access_property::streaming>::value || + std::is_same<_Property, access_property::normal>::value || + std::is_same<_Property, access_property::global>::value || + std::is_same<_Property, access_property::shared>::value + , "property is not convertible to cuda::access_property"); + return __detail_ap::__associate(__ptr, __prop); +} + +template +__host__ __device__ +void apply_access_property(const volatile void* __ptr, const _Shape __shape, access_property::persisting __prop) noexcept { +#if __CUDA_ARCH__ >= 800 + if (!__isGlobal((void*)__ptr)) return; + + char* __p = reinterpret_cast(const_cast(__ptr)); + static constexpr std::size_t _LINE_SIZE = 128; + std::size_t __nbytes = static_cast(__shape); + std::size_t __end = ((std::uintptr_t)(__p + __nbytes) % _LINE_SIZE) ? __nbytes + _LINE_SIZE : __nbytes; + __end /= _LINE_SIZE; + + //Apply to all 128 bytes aligned cache lines inclusive of __p + for (std::size_t __i = 0; __i < __end; __i += _LINE_SIZE) { + asm volatile ("prefetch.global.L2::evict_last [%0];" ::"l"(__p + (__i * _LINE_SIZE)) :); + } +#endif +} + +template +__host__ __device__ +void apply_access_property(const volatile void* __ptr, const _Shape __shape, access_property::normal __prop) noexcept { +#if __CUDA_ARCH__ >= 800 + if (!__isGlobal((void*)__ptr)) return; + + char* __p = reinterpret_cast(const_cast(__ptr)); + static constexpr std::size_t _LINE_SIZE = 128; + std::size_t __nbytes = static_cast(__shape); + std::size_t __end = ((std::uintptr_t)(__p + __nbytes) % _LINE_SIZE) ? __nbytes + _LINE_SIZE : __nbytes; + __end /= _LINE_SIZE; + + //Apply to all 128 bytes aligned cache lines inclusive of __p + for (std::size_t __i = 0; __i < __end; __i += _LINE_SIZE) { + asm volatile ("prefetch.global.L2::evict_normal [%0];" ::"l"(__p + (__i * _LINE_SIZE)) :); + } +#endif +} + +inline +__host__ __device__ +void discard_memory(volatile void* __ptr, std::size_t __nbytes) noexcept { +#if __CUDA_ARCH__ >= 800 + if (!__isGlobal((void*)__ptr)) return; + + char* __p = reinterpret_cast(const_cast(__ptr)); + static constexpr std::size_t _LINE_SIZE = 128; + std::size_t __start = (reinterpret_cast(__p) % _LINE_SIZE) ? 1 : 0; + std::size_t __end = (reinterpret_cast(__p + __nbytes) % _LINE_SIZE) ? __nbytes - _LINE_SIZE : __nbytes; + __end /= _LINE_SIZE; + + //Trim the first block and last block if they're not 128 bytes aligned + for (std::size_t __i = __start; __i < __end; __i += _LINE_SIZE) { + asm volatile ("discard.global.L2 [%0], 128;" ::"l"(__p + (__i * _LINE_SIZE)) :); + } +#endif +} + +template +class annotated_ptr: public __detail_ap::__annotated_ptr_base<_Property> { + public: + using value_type = _Tp; + using size_type = std::size_t; + using reference = value_type&; + using pointer = value_type*; + using const_pointer = value_type const*; + using difference_type = std::ptrdiff_t; + + private: + using __self = annotated_ptr<_Tp, _Property>; + + // Converting from a 64-bit to 32-bit shared pointer and maybe back just for storage might or might not be profitable. + pointer __repr = (pointer)((size_type)nullptr); + + __host__ __device__ pointer __get(bool __skip_prop = false, difference_type __n = 0) const { +#ifdef __CUDA_ARCH__ + if (!__skip_prop) { + return static_cast(this->__apply_prop(const_cast(static_cast(__repr + __n)))); + } +#endif + return __repr + __n; + } + __host__ __device__ pointer __offset(difference_type __n, bool __skip_prop = false) const { + return __get(__skip_prop, __n); + } + + public: + __host__ __device__ pointer operator->() const { + return __get(); + } + + __host__ __device__ reference operator*() const { + return *__get(); + } + + __host__ __device__ reference operator[](difference_type __n) const { + return *__offset(__n); + } + + __host__ __device__ constexpr difference_type operator-(annotated_ptr o) const { + return __repr - o.__repr; + } + + constexpr annotated_ptr() noexcept = default; + constexpr annotated_ptr(annotated_ptr const&) noexcept = default; + // No constexpr for c11 as the method can't be const + _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 annotated_ptr& operator=(annotated_ptr const& other) noexcept = default; + + __host__ __device__ explicit annotated_ptr(pointer __p) + : __repr(__p) + { +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 && defined(__CUDA_ARCH__) + _LIBCUDACXX_DEBUG_ASSERT(std::is_same<_Property, shared>::value && __isShared(__p) || __isGlobal(__p)); +#endif + } + + template + __host__ __device__ annotated_ptr(pointer __p, _RuntimeProperty __prop) + : __detail_ap::__annotated_ptr_base<_Property>(static_cast(access_property(__prop))), __repr(__p) + { + static_assert(std::is_same<_Property, access_property>::value, + "This method requires annotated_ptr"); + static_assert(std::is_same<_RuntimeProperty, access_property::global>::value || + std::is_same<_RuntimeProperty, access_property::normal>::value || + std::is_same<_RuntimeProperty, access_property::streaming>::value || + std::is_same<_RuntimeProperty, access_property::persisting>::value || + std::is_same<_RuntimeProperty, access_property>::value, + "This method requires RuntimeProperty=global|normal|streaming|persisting|access_property"); + +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 && defined(__CUDA_ARCH__) + _LIBCUDACXX_DEBUG_ASSERT(__isGlobal(__p) == true); +#endif + } + + template + __host__ __device__ annotated_ptr(const annotated_ptr<_TTp,_Prop>& __other); + + __host__ __device__ constexpr explicit operator bool() const noexcept { + return __repr != nullptr; + } + + __host__ __device__ pointer get() const noexcept { + constexpr bool __is_shared = std::is_same<_Property, access_property::shared>::value; + return __is_shared ? __repr : &(*annotated_ptr(__repr)); + } + + __host__ __device__ _Property __property() const noexcept { + return this->__get_property(); + } +}; + + +template +template +__host__ __device__ annotated_ptr<_Tp, _Property>::annotated_ptr(const annotated_ptr<_TTp,_Prop>& __other) + : __detail_ap::__annotated_ptr_base<_Property>(__other.__property()), __repr(__other.get()) +{ + static_assert(std::is_assignable::value, "pointer must be assignable from other pointer"); + static_assert((std::is_same<_Property, access_property>::value && !std::is_same<_Prop, access_property::shared>::value) || + std::is_same<_Property, _Prop>::value, "Property must be either access_property or other property, and both properties must have same address space"); + // note: precondition "__other.__rep must be compatible with _Property" currently always holds +} + +template +__host__ __device__ +void memcpy_async(_Dst* __dst, + annotated_ptr<_Src,_SrcProperty> __src, + _Shape __shape, _Sync & __sync) { + memcpy_async(__dst, &(*__src), __shape, __sync); +} + +template +__host__ __device__ +void memcpy_async(annotated_ptr<_Dst,_DstProperty> __dst, + annotated_ptr<_Src,_SrcProperty> __src, + _Shape __shape, _Sync & __sync){ + memcpy_async(&(*__dst), &(*__src), __shape, __sync); +} + +template +__host__ __device__ +void memcpy_async(const _Group & __group, + _Dst * __dst, + annotated_ptr<_Src,_SrcProperty> __src, + _Shape __shape, _Sync & __sync) { + memcpy_async(__group, __dst, &(*__src), __shape, __sync); +} + +template +__host__ __device__ +void memcpy_async(const _Group & __group, + annotated_ptr<_Dst,_DstProperty> __dst, + annotated_ptr<_Src,_SrcProperty> __src, + _Shape __shape, _Sync & __sync) { + memcpy_async(__group, &(*__dst), &(*__src), __shape, __sync); +} + +_LIBCUDACXX_END_NAMESPACE_CUDA + +#endif // _CUDA_ANNOTATED_PTR diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/atomic b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/atomic new file mode 100644 index 000000000000..b19967087b3f --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/atomic @@ -0,0 +1,10 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "std/atomic" diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/barrier b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/barrier new file mode 100644 index 000000000000..616a4013887c --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/barrier @@ -0,0 +1,10 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "std/barrier" diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/latch b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/latch new file mode 100644 index 000000000000..7b19dce5e5e4 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/latch @@ -0,0 +1,10 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "std/latch" diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/pipeline b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/pipeline new file mode 100644 index 000000000000..54cdcb1b3598 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/pipeline @@ -0,0 +1,635 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * NVIDIA SOFTWARE LICENSE + * + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. + * + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. + * + * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. + * + * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: + * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. + * + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. + * + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. + * + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. + * + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + * + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. + * + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. + * + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. + * + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. + * + * (v. August 20, 2021) + */ +#ifndef _CUDA_PIPELINE +#define _CUDA_PIPELINE + +#include +#include +#include + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + + template + class pipeline; + + enum class pipeline_role { + producer, + consumer + }; + + template + struct __pipeline_stage { + barrier<_Scope> __produced; + barrier<_Scope> __consumed; + }; + + template + class pipeline_shared_state { + public: + pipeline_shared_state() = default; + pipeline_shared_state(const pipeline_shared_state &) = delete; + pipeline_shared_state(pipeline_shared_state &&) = delete; + pipeline_shared_state & operator=(pipeline_shared_state &&) = delete; + pipeline_shared_state & operator=(const pipeline_shared_state &) = delete; + + private: + __pipeline_stage<_Scope> __stages[_Stages_count]; + atomic __refcount; + + template + friend class pipeline; + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, size_t __producer_count); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, pipeline_role __role); + }; + + struct __pipeline_asm_helper { + __device__ + static inline uint32_t __lane_id() + { + uint32_t __lane_id; + asm volatile ("mov.u32 %0, %%laneid;" : "=r"(__lane_id)); + return __lane_id; + } + }; + + template + class pipeline { + public: + pipeline(pipeline &&) = default; + pipeline(const pipeline &) = delete; + pipeline & operator=(pipeline &&) = delete; + pipeline & operator=(const pipeline &) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + ~pipeline() + { + if (__active) { + (void)quit(); + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + bool quit() + { + bool __elected; + uint32_t __sub_count; +NV_IF_TARGET(NV_IS_DEVICE, + const uint32_t __match_mask = __match_any_sync(__activemask(), reinterpret_cast(__shared_state_get_refcount())); + const uint32_t __elected_id = __ffs(__match_mask) - 1; + __elected = (__pipeline_asm_helper::__lane_id() == __elected_id); + __sub_count = __popc(__match_mask); +, + __elected = true; + __sub_count = 1; +) + bool __released = false; + if (__elected) { + const uint32_t __old = __shared_state_get_refcount()->fetch_sub(__sub_count); + const bool __last = (__old == __sub_count); + if (__last) { + for (uint8_t __stage = 0; __stage < __stages_count; ++__stage) { + __shared_state_get_stage(__stage)->__produced.~barrier(); + if (__partitioned) { + __shared_state_get_stage(__stage)->__consumed.~barrier(); + } + } + __released = true; + } + } + __active = false; + return __released; + } + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_acquire() + { + if (__partitioned) { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__head)->__consumed; + (void)_CUDA_VSTD::__libcpp_thread_poll_with_backoff(__poll_tester(__stage_barrier, __consumed_phase_parity)); + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_commit() + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__head)->__produced; + __memcpy_async_synchronize(__stage_barrier, true); + (void)__stage_barrier.arrive(); + if (++__head == __stages_count) { + __head = 0; + if (__partitioned) { + __consumed_phase_parity = !__consumed_phase_parity; + } + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_wait() + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__tail)->__produced; + (void)_CUDA_VSTD::__libcpp_thread_poll_with_backoff(__poll_tester(__stage_barrier, __produced_phase_parity)); + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_release() + { + if (__partitioned) { + (void)__shared_state_get_stage(__tail)->__consumed.arrive(); + } + if (++__tail == __stages_count) { + __tail = 0; + __produced_phase_parity = !__produced_phase_parity; + } + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_for(const _CUDA_VSTD::chrono::duration<_Rep, _Period> & __duration) + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__tail)->__produced; + return _CUDA_VSTD::__libcpp_thread_poll_with_backoff( + __poll_tester(__stage_barrier, __produced_phase_parity), + _CUDA_VSTD::chrono::duration_cast<_CUDA_VSTD::chrono::nanoseconds>(__duration) + ); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_until(const _CUDA_VSTD::chrono::time_point<_Clock, _Duration> & __time_point) + { + return consumer_wait_for(__time_point - _Clock::now()); + } + + private: + uint8_t __head : 8; + uint8_t __tail : 8; + const uint8_t __stages_count : 8; + bool __consumed_phase_parity : 1; + bool __produced_phase_parity : 1; + bool __active : 1; + const bool __partitioned : 1; + char * const __shared_state; + + _LIBCUDACXX_INLINE_VISIBILITY + pipeline(char * __shared_state, uint8_t __stages_count, bool __partitioned) + : __head(0) + , __tail(0) + , __stages_count(__stages_count) + , __consumed_phase_parity(true) + , __produced_phase_parity(false) + , __active(true) + , __partitioned(__partitioned) + , __shared_state(__shared_state) + {} + + _LIBCUDACXX_INLINE_VISIBILITY + static bool __barrier_try_wait_parity_impl(barrier<_Scope> & __barrier, bool __phase_parity) + { + typename barrier<_Scope>::arrival_token __synthesized_token = (__phase_parity ? 1ull : 0ull) << 63; + return __barrier.__try_wait(_CUDA_VSTD::move(__synthesized_token)); + } + + _LIBCUDACXX_INLINE_VISIBILITY + static bool __barrier_try_wait_parity(barrier<_Scope> & __barrier, bool __phase_parity) + { + return __barrier_try_wait_parity_impl(__barrier, __phase_parity); + } + + struct __poll_tester { + barrier<_Scope> & __barrier; + bool __phase_parity; + + _LIBCUDACXX_INLINE_VISIBILITY + __poll_tester(barrier<_Scope> & __barrier, bool __phase_parity) + : __barrier(__barrier) + , __phase_parity(__phase_parity) + {} + + _LIBCUDACXX_INLINE_VISIBILITY + bool operator()() const + { + return __barrier_try_wait_parity(__barrier, __phase_parity); + } + }; + + _LIBCUDACXX_INLINE_VISIBILITY + __pipeline_stage<_Scope> * __shared_state_get_stage(uint8_t __stage) + { + ptrdiff_t __stage_offset = __stage * sizeof(__pipeline_stage<_Scope>); + return reinterpret_cast<__pipeline_stage<_Scope>*>(__shared_state + __stage_offset); + } + + _LIBCUDACXX_INLINE_VISIBILITY + atomic * __shared_state_get_refcount() + { + ptrdiff_t __refcount_offset = __stages_count * sizeof(__pipeline_stage<_Scope>); + return reinterpret_cast*>(__shared_state + __refcount_offset); + } + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, size_t __producer_count); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, pipeline_role __role); + }; + + template<> + _LIBCUDACXX_INLINE_VISIBILITY + inline bool pipeline::__barrier_try_wait_parity(barrier & __barrier, bool __phase_parity) + { +NV_IF_TARGET(NV_PROVIDES_SM_80, + if (__isShared(&__barrier)) { + uint64_t * __mbarrier = device::barrier_native_handle(__barrier); + uint16_t __wait_complete; + + asm volatile ("{" + " .reg .pred %p;" + " mbarrier.test_wait.parity.shared.b64 %p, [%1], %2;" + " selp.u16 %0, 1, 0, %p;" + "}" + : "=h"(__wait_complete) + : "r"(static_cast(__cvta_generic_to_shared(__mbarrier))), "r"(static_cast(__phase_parity)) + : "memory"); + + return bool(__wait_complete); + } else + { + return __barrier_try_wait_parity_impl(__barrier, __phase_parity); + } +, + return __barrier_try_wait_parity_impl(__barrier, __phase_parity); +) + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state) + { + const uint32_t __group_size = static_cast(__group.size()); + const uint32_t __thread_rank = static_cast(__group.thread_rank()); + + if (__thread_rank == 0) { + for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) { + init(&__shared_state->__stages[__stage].__produced, __group_size); + } + __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); + } + __group.sync(); + + return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, false); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state, size_t __producer_count) + { + const uint32_t __group_size = static_cast(__group.size()); + const uint32_t __thread_rank = static_cast(__group.thread_rank()); + + if (__thread_rank == 0) { + const size_t __consumer_count = __group_size - __producer_count; + for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) { + init(&__shared_state->__stages[__stage].__consumed, __consumer_count); + init(&__shared_state->__stages[__stage].__produced, __producer_count); + } + __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); + } + __group.sync(); + + return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, true); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state, pipeline_role __role) + { + const uint32_t __group_size = static_cast(__group.size()); + const uint32_t __thread_rank = static_cast(__group.thread_rank()); + + if (__thread_rank == 0) { + __shared_state->__refcount.store(0, std::memory_order_relaxed); + } + __group.sync(); + + if (__role == pipeline_role::producer) { + bool __elected; + uint32_t __add_count; +NV_IF_TARGET(NV_IS_DEVICE, + const uint32_t __match_mask = __match_any_sync(__activemask(), reinterpret_cast(&__shared_state->__refcount)); + const uint32_t __elected_id = __ffs(__match_mask) - 1; + __elected = (__pipeline_asm_helper::__lane_id() == __elected_id); + __add_count = __popc(__match_mask); +, + __elected = true; + __add_count = 1; +) + if (__elected) { + (void)__shared_state->__refcount.fetch_add(__add_count, std::memory_order_relaxed); + } + } + __group.sync(); + + if (__thread_rank == 0) { + const uint32_t __producer_count = __shared_state->__refcount.load(std::memory_order_relaxed); + const uint32_t __consumer_count = __group_size - __producer_count; + for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) { + init(&__shared_state->__stages[__stage].__consumed, __consumer_count); + init(&__shared_state->__stages[__stage].__produced, __producer_count); + } + __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); + } + __group.sync(); + + return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, true); + } + +_LIBCUDACXX_END_NAMESPACE_CUDA + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE + + template + __device__ + void __pipeline_consumer_wait(pipeline & __pipeline); + + __device__ + inline void __pipeline_consumer_wait(pipeline & __pipeline, uint8_t __prior); + +_LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + + template<> + class pipeline { + public: + pipeline(pipeline &&) = default; + pipeline(const pipeline &) = delete; + pipeline & operator=(pipeline &&) = delete; + pipeline & operator=(const pipeline &) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + ~pipeline() {} + + _LIBCUDACXX_INLINE_VISIBILITY + bool quit() + { + return true; + } + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_acquire() {} + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_commit() + { +NV_IF_TARGET(NV_PROVIDES_SM_80, + asm volatile ("cp.async.commit_group;"); + ++__head; +) + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_wait() + { +NV_IF_TARGET(NV_PROVIDES_SM_80, + if (__head == __tail) { + return; + } + + const uint8_t __prior = __head - __tail - 1; + device::__pipeline_consumer_wait(*this, __prior); + ++__tail; +) + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_release() {} + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_for(const _CUDA_VSTD::chrono::duration<_Rep, _Period> & __duration) + { + (void)__duration; + consumer_wait(); + return true; + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_until(const _CUDA_VSTD::chrono::time_point<_Clock, _Duration> & __time_point) + { + (void)__time_point; + consumer_wait(); + return true; + } + + private: + uint8_t __head; + uint8_t __tail; + + _LIBCUDACXX_INLINE_VISIBILITY + pipeline() + : __head(0) + , __tail(0) + {} + + friend _LIBCUDACXX_INLINE_VISIBILITY inline pipeline make_pipeline(); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + void pipeline_consumer_wait_prior(pipeline & __pipeline); + }; + +_LIBCUDACXX_END_NAMESPACE_CUDA + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE + + template + __device__ + void __pipeline_consumer_wait(pipeline & __pipeline) + { + (void)__pipeline; +NV_IF_TARGET(NV_PROVIDES_SM_80, + constexpr uint8_t __max_prior = 8; + + asm volatile ("cp.async.wait_group %0;" + : + : "n"(_Prior < __max_prior ? _Prior : __max_prior)); +) + } + + __device__ + inline void __pipeline_consumer_wait(pipeline & __pipeline, uint8_t __prior) + { + switch (__prior) { + case 0: device::__pipeline_consumer_wait<0>(__pipeline); break; + case 1: device::__pipeline_consumer_wait<1>(__pipeline); break; + case 2: device::__pipeline_consumer_wait<2>(__pipeline); break; + case 3: device::__pipeline_consumer_wait<3>(__pipeline); break; + case 4: device::__pipeline_consumer_wait<4>(__pipeline); break; + case 5: device::__pipeline_consumer_wait<5>(__pipeline); break; + case 6: device::__pipeline_consumer_wait<6>(__pipeline); break; + case 7: device::__pipeline_consumer_wait<7>(__pipeline); break; + default: device::__pipeline_consumer_wait<8>(__pipeline); break; + } + } + +_LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + + _LIBCUDACXX_INLINE_VISIBILITY + inline pipeline make_pipeline() + { + return pipeline(); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void pipeline_consumer_wait_prior(pipeline & __pipeline) + { +NV_IF_TARGET(NV_PROVIDES_SM_80, + device::__pipeline_consumer_wait<_Prior>(__pipeline); + __pipeline.__tail = __pipeline.__head - _Prior; +) + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void pipeline_producer_commit(pipeline & __pipeline, barrier<_Scope> & __barrier) + { + (void)__pipeline; +NV_IF_TARGET(NV_PROVIDES_SM_80, + __memcpy_async_synchronize(__barrier, true); +) + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void __memcpy_async_synchronize(pipeline<_Scope> & __pipeline, bool __is_async) { + // memcpy_async submissions never synchronize on their own in the pipeline path. + (void)__pipeline; + (void)__is_async; + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void memcpy_async(_Group const & __group, _Type * __destination, _Type const * __source, std::size_t __size, pipeline<_Scope> & __pipeline) + { + // When compiling with NVCC and GCC 4.8, certain user defined types that _are_ trivially copyable are + // incorrectly classified as not trivially copyable. Remove this assertion to allow for their usage with + // memcpy_async when compiling with GCC 4.8. + // FIXME: remove the #if once GCC 4.8 is no longer supported. + #if !defined(_LIBCUDACXX_COMPILER_GCC) || _GNUC_VER > 408 + static_assert(_CUDA_VSTD::is_trivially_copyable<_Type>::value, "memcpy_async requires a trivially copyable type"); + #endif + + __memcpy_async(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); + } + + template _Alignment) ? alignof(_Type) : _Alignment> + _LIBCUDACXX_INLINE_VISIBILITY + void memcpy_async(_Group const & __group, _Type * __destination, _Type const * __source, aligned_size_t<_Alignment> __size, pipeline<_Scope> & __pipeline) { + // When compiling with NVCC and GCC 4.8, certain user defined types that _are_ trivially copyable are + // incorrectly classified as not trivially copyable. Remove this assertion to allow for their usage with + // memcpy_async when compiling with GCC 4.8. + // FIXME: remove the #if once GCC 4.8 is no longer supported. + #if !defined(_LIBCUDACXX_COMPILER_GCC) || _GNUC_VER > 408 + static_assert(_CUDA_VSTD::is_trivially_copyable<_Type>::value, "memcpy_async requires a trivially copyable type"); + #endif + + __memcpy_async<_Larger_alignment>(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void memcpy_async(_Type * __destination, _Type const * __source, _Size __size, pipeline<_Scope> & __pipeline) { + memcpy_async(__single_thread_group{}, __destination, __source, __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void memcpy_async(_Group const & __group, void * __destination, void const * __source, std::size_t __size, pipeline<_Scope> & __pipeline) { + __memcpy_async<1>(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void memcpy_async(_Group const & __group, void * __destination, void const * __source, aligned_size_t<_Alignment> __size, pipeline<_Scope> & __pipeline) { + __memcpy_async<_Alignment>(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void memcpy_async(void * __destination, void const * __source, _Size __size, pipeline<_Scope> & __pipeline) { + memcpy_async(__single_thread_group{}, __destination, __source, __size, __pipeline); + } + +_LIBCUDACXX_END_NAMESPACE_CUDA + +#endif //_CUDA_PIPELINE diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/semaphore b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/semaphore new file mode 100644 index 000000000000..eb76c73537f1 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/semaphore @@ -0,0 +1,10 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "std/semaphore" diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/array b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/array new file mode 100644 index 000000000000..64240539bb47 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/array @@ -0,0 +1,34 @@ +//===----------------------------------------------------------------------===// +// +// Part of the CUDA Toolkit, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_ARRAY +#define _CUDA_ARRAY + +#include "cassert" +#include "cstdint" +#include "limits" +#include "type_traits" +#include "iterator" +#include "utility" +#include "initializer_list" + +#include "detail/__config" + +#include "detail/__pragma_push" + +#include "detail/libcxx/include/algorithm" +#include "detail/libcxx/include/__tuple" +#include "detail/libcxx/include/cstdlib" +#include "detail/libcxx/include/stdexcept" +#include "detail/libcxx/include/array" + +#include "detail/__pragma_pop" + +#endif //_CUDA_ARRAY + + diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/atomic b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/atomic new file mode 100644 index 000000000000..65315f30d515 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/atomic @@ -0,0 +1,296 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_ATOMIC +#define _CUDA_ATOMIC + +#ifndef __CUDACC_RTC__ + #include + static_assert(ATOMIC_BOOL_LOCK_FREE == 2, ""); + static_assert(ATOMIC_CHAR_LOCK_FREE == 2, ""); + static_assert(ATOMIC_CHAR16_T_LOCK_FREE == 2, ""); + static_assert(ATOMIC_CHAR32_T_LOCK_FREE == 2, ""); + static_assert(ATOMIC_WCHAR_T_LOCK_FREE == 2, ""); + static_assert(ATOMIC_SHORT_LOCK_FREE == 2, ""); + static_assert(ATOMIC_INT_LOCK_FREE == 2, ""); + static_assert(ATOMIC_LONG_LOCK_FREE == 2, ""); + static_assert(ATOMIC_LLONG_LOCK_FREE == 2, ""); + static_assert(ATOMIC_POINTER_LOCK_FREE == 2, ""); + #undef ATOMIC_BOOL_LOCK_FREE + #undef ATOMIC_BOOL_LOCK_FREE + #undef ATOMIC_CHAR_LOCK_FREE + #undef ATOMIC_CHAR16_T_LOCK_FREE + #undef ATOMIC_CHAR32_T_LOCK_FREE + #undef ATOMIC_WCHAR_T_LOCK_FREE + #undef ATOMIC_SHORT_LOCK_FREE + #undef ATOMIC_INT_LOCK_FREE + #undef ATOMIC_LONG_LOCK_FREE + #undef ATOMIC_LLONG_LOCK_FREE + #undef ATOMIC_POINTER_LOCK_FREE + #undef ATOMIC_FLAG_INIT + #undef ATOMIC_VAR_INIT +#endif //__CUDACC_RTC__ + +// pre-define lock free query for heterogeneous compatibility +#ifndef _LIBCUDACXX_ATOMIC_IS_LOCK_FREE +#define _LIBCUDACXX_ATOMIC_IS_LOCK_FREE(__x) (__x <= 8) +#endif + +#include "cassert" +#include "cstddef" +#include "cstdint" +#include "type_traits" +#include "version" + +#include "detail/__config" + +#include "detail/__pragma_push" + +#include "detail/__threading_support" + +#include "detail/libcxx/include/atomic" + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + +using std::__detail::thread_scope; +using std::__detail::thread_scope_system; +using std::__detail::thread_scope_device; +using std::__detail::thread_scope_block; +using std::__detail::thread_scope_thread; + +namespace __detail { +using std::__detail::__thread_scope_block_tag; +using std::__detail::__thread_scope_device_tag; +using std::__detail::__thread_scope_system_tag; +} + +using memory_order = std::memory_order; + +constexpr memory_order memory_order_relaxed = std::memory_order_relaxed; +constexpr memory_order memory_order_consume = std::memory_order_consume; +constexpr memory_order memory_order_acquire = std::memory_order_acquire; +constexpr memory_order memory_order_release = std::memory_order_release; +constexpr memory_order memory_order_acq_rel = std::memory_order_acq_rel; +constexpr memory_order memory_order_seq_cst = std::memory_order_seq_cst; + +// atomic + +template +struct atomic + : public std::__atomic_base<_Tp, _Sco> +{ + typedef std::__atomic_base<_Tp, _Sco> __base; + + constexpr atomic() noexcept = default; + __host__ __device__ + constexpr atomic(_Tp __d) noexcept : __base(__d) {} + + __host__ __device__ + _Tp operator=(_Tp __d) volatile noexcept + {__base::store(__d); return __d;} + __host__ __device__ + _Tp operator=(_Tp __d) noexcept + {__base::store(__d); return __d;} + + __host__ __device__ + _Tp fetch_max(const _Tp & __op, memory_order __m = memory_order_seq_cst) volatile noexcept + { + return std::__detail::__cxx_atomic_fetch_max(&this->__a_, __op, __m); + } + + __host__ __device__ + _Tp fetch_min(const _Tp & __op, memory_order __m = memory_order_seq_cst) volatile noexcept + { + return std::__detail::__cxx_atomic_fetch_min(&this->__a_, __op, __m); + } +}; + +// atomic + +template +struct atomic<_Tp*, _Sco> + : public std::__atomic_base<_Tp*, _Sco> +{ + typedef std::__atomic_base<_Tp*, _Sco> __base; + + constexpr atomic() noexcept = default; + __host__ __device__ + constexpr atomic(_Tp* __d) noexcept : __base(__d) {} + + __host__ __device__ + _Tp* operator=(_Tp* __d) volatile noexcept + {__base::store(__d); return __d;} + __host__ __device__ + _Tp* operator=(_Tp* __d) noexcept + {__base::store(__d); return __d;} + + __host__ __device__ + _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) + volatile noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + __host__ __device__ + _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + __host__ __device__ + _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) + volatile noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + __host__ __device__ + _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + + __host__ __device__ + _Tp* operator++(int) volatile noexcept {return fetch_add(1);} + __host__ __device__ + _Tp* operator++(int) noexcept {return fetch_add(1);} + __host__ __device__ + _Tp* operator--(int) volatile noexcept {return fetch_sub(1);} + __host__ __device__ + _Tp* operator--(int) noexcept {return fetch_sub(1);} + __host__ __device__ + _Tp* operator++() volatile noexcept {return fetch_add(1) + 1;} + __host__ __device__ + _Tp* operator++() noexcept {return fetch_add(1) + 1;} + __host__ __device__ + _Tp* operator--() volatile noexcept {return fetch_sub(1) - 1;} + __host__ __device__ + _Tp* operator--() noexcept {return fetch_sub(1) - 1;} + __host__ __device__ + _Tp* operator+=(ptrdiff_t __op) volatile noexcept {return fetch_add(__op) + __op;} + __host__ __device__ + _Tp* operator+=(ptrdiff_t __op) noexcept {return fetch_add(__op) + __op;} + __host__ __device__ + _Tp* operator-=(ptrdiff_t __op) volatile noexcept {return fetch_sub(__op) - __op;} + __host__ __device__ + _Tp* operator-=(ptrdiff_t __op) noexcept {return fetch_sub(__op) - __op;} +}; + +// atomic_ref + +template +struct atomic_ref + : public std::__atomic_base_ref<_Tp, _Sco> +{ + typedef std::__atomic_base_ref<_Tp, _Sco> __base; + + __host__ __device__ + constexpr atomic_ref(_Tp& __d) noexcept : __base(__d) {} + + __host__ __device__ + _Tp operator=(_Tp __d) const volatile noexcept + {__base::store(__d); return __d;} + __host__ __device__ + _Tp operator=(_Tp __d) const noexcept + {__base::store(__d); return __d;} + + __host__ __device__ + _Tp fetch_max(const _Tp & __op, memory_order __m = memory_order_seq_cst) const volatile noexcept + { + return std::__detail::__cxx_atomic_fetch_max(&this->__a_, __op, __m); + } + + __host__ __device__ + _Tp fetch_min(const _Tp & __op, memory_order __m = memory_order_seq_cst) const volatile noexcept + { + return std::__detail::__cxx_atomic_fetch_min(&this->__a_, __op, __m); + } +}; + +// atomic_ref + +template +struct atomic_ref<_Tp*, _Sco> + : public std::__atomic_base_ref<_Tp*, _Sco> +{ + typedef std::__atomic_base_ref<_Tp*, _Sco> __base; + + __host__ __device__ + constexpr atomic_ref(_Tp*& __d) noexcept : __base(__d) {} + + __host__ __device__ + _Tp* operator=(_Tp* __d) const volatile noexcept + {__base::store(__d); return __d;} + __host__ __device__ + _Tp* operator=(_Tp* __d) const noexcept + {__base::store(__d); return __d;} + + __host__ __device__ + _Tp* fetch_add(ptrdiff_t __op, + memory_order __m = memory_order_seq_cst) const volatile noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + __host__ __device__ + _Tp* fetch_add(ptrdiff_t __op, + memory_order __m = memory_order_seq_cst) const noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + __host__ __device__ + _Tp* fetch_sub(ptrdiff_t __op, + memory_order __m = memory_order_seq_cst) const volatile noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + __host__ __device__ + _Tp* fetch_sub(ptrdiff_t __op, + memory_order __m = memory_order_seq_cst) const noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + + __host__ __device__ + _Tp* operator++(int) const volatile noexcept {return fetch_add(1);} + __host__ __device__ + _Tp* operator++(int) const noexcept {return fetch_add(1);} + __host__ __device__ + _Tp* operator--(int) const volatile noexcept {return fetch_sub(1);} + __host__ __device__ + _Tp* operator--(int) const noexcept {return fetch_sub(1);} + __host__ __device__ + _Tp* operator++() const volatile noexcept {return fetch_add(1) + 1;} + __host__ __device__ + _Tp* operator++() const noexcept {return fetch_add(1) + 1;} + __host__ __device__ + _Tp* operator--() const volatile noexcept {return fetch_sub(1) - 1;} + __host__ __device__ + _Tp* operator--() const noexcept {return fetch_sub(1) - 1;} + __host__ __device__ + _Tp* operator+=(ptrdiff_t __op) const volatile noexcept {return fetch_add(__op) + __op;} + __host__ __device__ + _Tp* operator+=(ptrdiff_t __op) const noexcept {return fetch_add(__op) + __op;} + __host__ __device__ + _Tp* operator-=(ptrdiff_t __op) const volatile noexcept {return fetch_sub(__op) - __op;} + __host__ __device__ + _Tp* operator-=(ptrdiff_t __op) const noexcept {return fetch_sub(__op) - __op;} +}; + +inline __host__ __device__ void atomic_thread_fence(memory_order __m, thread_scope _Scope = thread_scope::thread_scope_system) { + NV_DISPATCH_TARGET( + NV_IS_DEVICE, ( + switch(_Scope) { + case thread_scope::thread_scope_system: + std::__detail::__atomic_thread_fence_cuda((int)__m, __detail::__thread_scope_system_tag()); + break; + case thread_scope::thread_scope_device: + std::__detail::__atomic_thread_fence_cuda((int)__m, __detail::__thread_scope_device_tag()); + break; + case thread_scope::thread_scope_block: + std::__detail::__atomic_thread_fence_cuda((int)__m, __detail::__thread_scope_block_tag()); + break; + } + ), + NV_IS_HOST, ( + (void) _Scope; + std::atomic_thread_fence(__m); + ) + ) +} + +inline __host__ __device__ void atomic_signal_fence(memory_order __m) { + std::atomic_signal_fence(__m); +} + +_LIBCUDACXX_END_NAMESPACE_CUDA + +#include "detail/__pragma_pop" + +#endif //_CUDA_ATOMIC diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/barrier b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/barrier new file mode 100644 index 000000000000..69a8ecbbfbdb --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/barrier @@ -0,0 +1,468 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700 +# error "CUDA synchronization primitives are only supported for sm_70 and up." +#endif + +#ifndef _CUDA_BARRIER +#define _CUDA_BARRIER + +#include "atomic" +#include "cstddef" + +#include "detail/__config" + +#include "detail/__pragma_push" + +#include "detail/libcxx/include/barrier" + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + +template +struct aligned_size_t { + static constexpr std::size_t align = _Alignment; + std::size_t value; + __host__ __device__ + explicit aligned_size_t(size_t __s) : value(__s) { } + __host__ __device__ + operator size_t() const { return value; } +}; + +template +class barrier : public std::__barrier_base<_CompletionF, _Sco> { + template + friend class pipeline; + + using std::__barrier_base<_CompletionF, _Sco>::__try_wait; + +public: + barrier() = default; + + barrier(const barrier &) = delete; + barrier & operator=(const barrier &) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR + barrier(std::ptrdiff_t __expected, _CompletionF __completion = _CompletionF()) + : std::__barrier_base<_CompletionF, _Sco>(__expected, __completion) { + } + + _LIBCUDACXX_INLINE_VISIBILITY + friend void init(barrier * __b, std::ptrdiff_t __expected) { + new (__b) barrier(__expected); + } + + _LIBCUDACXX_INLINE_VISIBILITY + friend void init(barrier * __b, std::ptrdiff_t __expected, _CompletionF __completion) { + new (__b) barrier(__expected, __completion); + } +}; + +struct __block_scope_barrier_base {}; + +_LIBCUDACXX_END_NAMESPACE_CUDA + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE + +__device__ +inline std::uint64_t * barrier_native_handle(barrier & b); + +_LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + +template<> +class barrier : public __block_scope_barrier_base { + using __barrier_base = std::__barrier_base; + __barrier_base __barrier; + + __device__ + friend inline std::uint64_t * device::_LIBCUDACXX_CUDA_ABI_NAMESPACE::barrier_native_handle(barrier & b); + +public: + using arrival_token = typename __barrier_base::arrival_token; + +private: + struct __poll_tester { + barrier const* __this; + arrival_token __phase; + + _LIBCUDACXX_INLINE_VISIBILITY + __poll_tester(barrier const* __this_, arrival_token&& __phase_) + : __this(__this_) + , __phase(_CUDA_VSTD::move(__phase_)) + {} + + inline _LIBCUDACXX_INLINE_VISIBILITY + bool operator()() const + { + return __this->__try_wait(__phase); + } + }; + + _LIBCUDACXX_INLINE_VISIBILITY + bool __try_wait(arrival_token __phase) const { +#if __CUDA_ARCH__ >= 800 + if (__isShared(&__barrier)) { + int __ready = 0; + asm volatile ("{\n\t" + ".reg .pred p;\n\t" + "mbarrier.test_wait.shared.b64 p, [%1], %2;\n\t" + "selp.b32 %0, 1, 0, p;\n\t" + "}" + : "=r"(__ready) + : "r"(static_cast(__cvta_generic_to_shared(&__barrier))), "l"(__phase) + : "memory"); + return __ready; + } + else +#endif + { + return __barrier.__try_wait(std::move(__phase)); + } + } + + template + friend class pipeline; + +public: + barrier() = default; + + barrier(const barrier &) = delete; + barrier & operator=(const barrier &) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + barrier(std::ptrdiff_t __expected, std::__empty_completion __completion = std::__empty_completion()) { + static_assert(_LIBCUDACXX_OFFSET_IS_ZERO(barrier, __barrier), "fatal error: bad barrier layout"); + init(this, __expected, __completion); + } + + _LIBCUDACXX_INLINE_VISIBILITY + ~barrier() { +#if __CUDA_ARCH__ >= 800 + if (__isShared(&__barrier)) { + asm volatile ("mbarrier.inval.shared.b64 [%0];" + :: "r"(static_cast(__cvta_generic_to_shared(&__barrier))) + : "memory"); + } +#endif + } + + _LIBCUDACXX_INLINE_VISIBILITY + friend void init(barrier * __b, std::ptrdiff_t __expected, std::__empty_completion __completion = std::__empty_completion()) { +#if __CUDA_ARCH__ >= 800 + if (__isShared(&__b->__barrier)) { + asm volatile ("mbarrier.init.shared.b64 [%0], %1;" + :: "r"(static_cast(__cvta_generic_to_shared(&__b->__barrier))), + "r"(static_cast(__expected)) + : "memory"); + } + else +#endif + { + new (&__b->__barrier) __barrier_base(__expected); + } + } + + _LIBCUDACXX_NODISCARD_ATTRIBUTE _LIBCUDACXX_INLINE_VISIBILITY + arrival_token arrive(std::ptrdiff_t __update = 1) + { +#if __CUDA_ARCH__ + if (__isShared(&__barrier)) { + arrival_token __token; +#if __CUDA_ARCH__ >= 800 + if (__update > 1) { + asm volatile ("mbarrier.arrive.noComplete.shared.b64 %0, [%1], %2;" + : "=l"(__token) + : "r"(static_cast(__cvta_generic_to_shared(&__barrier))), + "r"(static_cast(__update - 1)) + : "memory"); + } + asm volatile ("mbarrier.arrive.shared.b64 %0, [%1];" + : "=l"(__token) + : "r"(static_cast(__cvta_generic_to_shared(&__barrier))) + : "memory"); +#else + unsigned int __activeA = __match_any_sync(__activemask(), __update); + unsigned int __activeB = __match_any_sync(__activemask(), reinterpret_cast(&__barrier)); + unsigned int __active = __activeA & __activeB; + int __inc = __popc(__active) * __update; + + unsigned __laneid; + asm volatile ("mov.u32 %0, %laneid;" : "=r"(__laneid)); + int __leader = __ffs(__active) - 1; + + if(__leader == __laneid) + { + __token = __barrier.arrive(__inc); + } + __token = __shfl_sync(__active, __token, __leader); +#endif + return __token; + } + else +#endif + { + return __barrier.arrive(__update); + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + void wait(arrival_token && __phase) const + { + _CUDA_VSTD::__libcpp_thread_poll_with_backoff(__poll_tester(this, _CUDA_VSTD::move(__phase))); + } + + inline _LIBCUDACXX_INLINE_VISIBILITY + void arrive_and_wait() + { + wait(arrive()); + } + + _LIBCUDACXX_INLINE_VISIBILITY + void arrive_and_drop() + { +#if __CUDA_ARCH__ >= 800 + if (__isShared(&__barrier)) { + asm volatile ("mbarrier.arrive_drop.shared.b64 _, [%0];" + :: "r"(static_cast(__cvta_generic_to_shared(&__barrier))) + : "memory"); + } + else +#endif + { + __barrier.arrive_and_drop(); + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + static constexpr ptrdiff_t max() noexcept + { + return (1 << 20) - 1; + } +}; + +_LIBCUDACXX_END_NAMESPACE_CUDA + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE + +__device__ +inline std::uint64_t * barrier_native_handle(barrier & b) { + return reinterpret_cast(&b.__barrier); +} + +_LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + +template<> +class barrier : private barrier { + using __base = barrier; + +public: + using __base::__base; + + _LIBCUDACXX_INLINE_VISIBILITY + friend void init(barrier * __b, std::ptrdiff_t __expected, std::__empty_completion __completion = std::__empty_completion()) { + init(static_cast<__base *>(__b), __expected, __completion); + } + + using __base::arrive; + using __base::wait; + using __base::arrive_and_wait; + using __base::arrive_and_drop; + using __base::max; +}; + +template +_LIBCUDACXX_INLINE_VISIBILITY +inline void __strided_memcpy(char * __destination, char const * __source, std::size_t __total_size, std::size_t __rank, std::size_t __stride = 1) { + if (__stride == 1) { + memcpy(__destination, __source, __total_size); + } + else { + for (std::size_t __offset = __rank * _Alignment; __offset < __total_size; __offset += __stride * _Alignment) { + memcpy(__destination + __offset, __source + __offset, _Alignment); + } + } +} + +#if __CUDA_ARCH__ >= 800 +template 16)> +struct __memcpy_async_impl { + __device__ static inline bool __copy(char * __destination, char const * __source, std::size_t __total_size, std::size_t __rank, std::size_t __stride) { + __strided_memcpy<_Alignment>(__destination, __source, __total_size, __rank, __stride); + return false; + } +}; + +template<> +struct __memcpy_async_impl<4, false> { + __device__ static inline bool __copy(char * __destination, char const * __source, std::size_t __total_size, std::size_t __rank, std::size_t __stride) { + for (std::size_t __offset = __rank * 4; __offset < __total_size; __offset += __stride * 4) { + asm volatile ("cp.async.ca.shared.global [%0], [%1], 4, 4;" + :: "r"(static_cast(__cvta_generic_to_shared(__destination + __offset))), + "l"(__source + __offset) + : "memory"); + } + return true; + } +}; + +template<> +struct __memcpy_async_impl<8, false> { + __device__ static inline bool __copy(char * __destination, char const * __source, std::size_t __total_size, std::size_t __rank, std::size_t __stride) { + for (std::size_t __offset = __rank * 8; __offset < __total_size; __offset += __stride * 8) { + asm volatile ("cp.async.ca.shared.global [%0], [%1], 8, 8;" + :: "r"(static_cast(__cvta_generic_to_shared(__destination + __offset))), + "l"(__source + __offset) + : "memory"); + } + return true; + } +}; + +template<> +struct __memcpy_async_impl<16, false> { + __device__ static inline bool __copy(char * __destination, char const * __source, std::size_t __total_size, std::size_t __rank, std::size_t __stride) { + for (std::size_t __offset = __rank * 16; __offset < __total_size; __offset += __stride * 16) { + asm volatile ("cp.async.cg.shared.global [%0], [%1], 16, 16;" + :: "r"(static_cast(__cvta_generic_to_shared(__destination + __offset))), + "l"(__source + __offset) + : "memory"); + } + return true; + } +}; + +template +struct __memcpy_async_impl<_Alignment, true> : public __memcpy_async_impl<16, false> { }; +#endif + +template= thread_scope_block) && std::is_same<_CompF, std::__empty_completion>::value> +_LIBCUDACXX_INLINE_VISIBILITY +inline void __memcpy_async_synchronize(barrier<_Sco, _CompF> & __barrier, bool __is_async) { +#if __CUDA_ARCH__ >= 800 + if (__is_async) { + if (_Is_mbarrier && __isShared(&__barrier)) { + asm volatile ("cp.async.mbarrier.arrive.shared.b64 [%0];" + :: "r"(static_cast(__cvta_generic_to_shared(&__barrier))) + : "memory"); + } + else { + asm volatile ("cp.async.wait_all;" + ::: "memory"); + } + } +#endif +} + +template +_LIBCUDACXX_INLINE_VISIBILITY +void inline __memcpy_async(_Group const & __group, char * __destination, char const * __source, std::size_t __size, _Sync & __sync) { + bool __is_async = false; + +#if __CUDA_ARCH__ >= 800 + __is_async = __isShared(__destination) && __isGlobal(__source); + + if (__is_async) { + if (_Native_alignment < 4) { + auto __source_address = reinterpret_cast(__source); + auto __destination_address = reinterpret_cast(__destination); + + // Lowest bit set will tell us what the common alignment of the three values is. + auto _Alignment = __ffs(__source_address | __destination_address | __size); + + switch (_Alignment) { + default: __is_async = __memcpy_async_impl<16>::__copy(__destination, __source, __size, __group.thread_rank(), __group.size()); break; + case 4: __is_async = __memcpy_async_impl<8>::__copy(__destination, __source, __size, __group.thread_rank(), __group.size()); break; + case 3: __is_async = __memcpy_async_impl<4>::__copy(__destination, __source, __size, __group.thread_rank(), __group.size()); break; + case 2: // fallthrough + case 1: __is_async = __memcpy_async_impl<1>::__copy(__destination, __source, __size, __group.thread_rank(), __group.size()); break; + } + } + else { + __is_async = __memcpy_async_impl<_Native_alignment>::__copy(__destination, __source, __size, __group.thread_rank(), __group.size()); + } + } + else +#endif + { + __strided_memcpy<_Native_alignment>(__destination, __source, __size, __group.thread_rank(), __group.size()); + } + + __memcpy_async_synchronize(__sync, __is_async); +} + +struct __single_thread_group { + _LIBCUDACXX_INLINE_VISIBILITY + void sync() const {} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr std::size_t size() const { return 1; }; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr std::size_t thread_rank() const { return 0; }; +}; + +template +_LIBCUDACXX_INLINE_VISIBILITY +void memcpy_async(_Group const & __group, _Tp * __destination, _Tp const * __source, std::size_t __size, barrier<_Sco, _CompF> & __barrier) { + // When compiling with NVCC and GCC 4.8, certain user defined types that _are_ trivially copyable are + // incorrectly classified as not trivially copyable. Remove this assertion to allow for their usage with + // memcpy_async when compiling with GCC 4.8. + // FIXME: remove the #if once GCC 4.8 is no longer supported. +#if !defined(_LIBCUDACXX_COMPILER_GCC) || _GNUC_VER > 408 + static_assert(std::is_trivially_copyable<_Tp>::value, "memcpy_async requires a trivially copyable type"); +#endif + + __memcpy_async(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __barrier); +} + +template _Alignment) ? alignof(_Tp) : _Alignment> +_LIBCUDACXX_INLINE_VISIBILITY +void memcpy_async(_Group const & __group, _Tp * __destination, _Tp const * __source, aligned_size_t<_Alignment> __size, barrier<_Sco, _CompF> & __barrier) { + // When compiling with NVCC and GCC 4.8, certain user defined types that _are_ trivially copyable are + // incorrectly classified as not trivially copyable. Remove this assertion to allow for their usage with + // memcpy_async when compiling with GCC 4.8. + // FIXME: remove the #if once GCC 4.8 is no longer supported. +#if !defined(_LIBCUDACXX_COMPILER_GCC) || _GNUC_VER > 408 + static_assert(std::is_trivially_copyable<_Tp>::value, "memcpy_async requires a trivially copyable type"); +#endif + + __memcpy_async<_Larger_alignment>(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __barrier); +} + +template +_LIBCUDACXX_INLINE_VISIBILITY +void memcpy_async(_Tp * __destination, _Tp const * __source, _Size __size, barrier<_Sco, _CompF> & __barrier) { + memcpy_async(__single_thread_group{}, __destination, __source, __size, __barrier); +} + +template +_LIBCUDACXX_INLINE_VISIBILITY +void memcpy_async(_Group const & __group, void * __destination, void const * __source, std::size_t __size, barrier<_Sco, _CompF> & __barrier) { + __memcpy_async<1>(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __barrier); +} + +template +_LIBCUDACXX_INLINE_VISIBILITY +void memcpy_async(_Group const & __group, void * __destination, void const * __source, aligned_size_t<_Alignment> __size, barrier<_Sco, _CompF> & __barrier) { + __memcpy_async<_Alignment>(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __barrier); +} + +template +_LIBCUDACXX_INLINE_VISIBILITY +void memcpy_async(void * __destination, void const * __source, _Size __size, barrier<_Sco, _CompF> & __barrier) { + memcpy_async(__single_thread_group{}, __destination, __source, __size, __barrier); +} + +_LIBCUDACXX_END_NAMESPACE_CUDA + +#include "detail/__pragma_pop" + +#endif //_CUDA_BARRIER diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/bit b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/bit new file mode 100644 index 000000000000..af80d4832c12 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/bit @@ -0,0 +1,24 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_BIT +#define _CUDA_BIT + +#include "detail/__config" + +#include "detail/__pragma_push" + +#include "cstdint" +#include "limits" +#include "type_traits" +#include "detail/libcxx/include/bit" + +#include "detail/__pragma_pop" + +#endif //_CUDA_BIT diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cassert b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cassert new file mode 100644 index 000000000000..96b5c6e8f679 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cassert @@ -0,0 +1,28 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_CASSERT +#define _CUDA_CASSERT + +#ifndef __CUDACC_RTC__ + +#include +#include + +#include "detail/__config" + +#include "detail/__pragma_push" + +#include "detail/libcxx/include/cassert" + +#include "detail/__pragma_pop" + +#endif //__CUDACC_RTC__ + +#endif //_CUDA_CASSERT diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/ccomplex b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/ccomplex new file mode 100644 index 000000000000..29de0d603972 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/ccomplex @@ -0,0 +1,14 @@ +//===----------------------------------------------------------------------===// +// +// Part of the CUDA Toolkit, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_CCOMPLEX +#define _CUDA_CCOMPLEX + +#include "complex" + +#endif //_CUDA_CCOMPLEX diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cfloat b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cfloat new file mode 100644 index 000000000000..ab752a097142 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cfloat @@ -0,0 +1,26 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_CFLOAT +#define _CUDA_CFLOAT + +#ifndef __CUDACC_RTC__ + #include + #include +#endif //__CUDACC_RTC__ + +#include "detail/__config" + +#include "detail/__pragma_push" + +#include "detail/libcxx/include/cfloat" + +#include "detail/__pragma_pop" + +#endif //_CUDA_CFLOAT diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/chrono b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/chrono new file mode 100644 index 000000000000..a3abd9ce7cb5 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/chrono @@ -0,0 +1,83 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_CHRONO +#define _CUDA_CHRONO + +#ifndef __CUDACC_RTC__ + #include +#endif //__CUDACC_RTC__ + +#include "ctime" +#include "type_traits" +#include "ratio" +#include "limits" +#include "version" + +#include "detail/__config" + +#include "detail/__pragma_push" + +// Silence NVCC warnings `long double` arising from chrono floating pointer +// user-defined literals which are defined in terms of `long double`. + +// FIXME: There is currently no way to disable this diagnostic in a fine-grained +// fashion; if you include this header, the diagnostic will be suppressed +// throughout the translation unit. The alternative is loosing (conforming) +// chrono user-defined literals; this seems like the lesser of two evils, so... +#if defined(_LIBCUDACXX_COMPILER_NVCC) +# if (CUDART_VERSION >= 11050) +# pragma nv_diag_suppress cuda_demote_unsupported_floating_point +# else +# pragma diag_suppress cuda_demote_unsupported_floating_point +# endif +#endif + +#include "detail/libcxx/include/chrono" + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +namespace chrono { + +inline _LIBCUDACXX_INLINE_VISIBILITY +system_clock::time_point system_clock::now() _NOEXCEPT +{ +#ifdef __CUDA_ARCH__ + uint64_t __time; + asm volatile("mov.u64 %0, %globaltimer;":"=l"(__time)::); + return time_point(duration_cast(nanoseconds(__time))); +#else + return time_point(duration_cast(nanoseconds( + ::std::chrono::duration_cast<::std::chrono::nanoseconds>( + ::std::chrono::system_clock::now().time_since_epoch() + ).count() + ))); +#endif +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +time_t system_clock::to_time_t(const system_clock::time_point& __t) _NOEXCEPT +{ + return time_t(duration_cast(__t.time_since_epoch()).count()); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +system_clock::time_point system_clock::from_time_t(time_t __t) _NOEXCEPT +{ + return time_point(seconds(__t));; +} +} + +_LIBCUDACXX_END_NAMESPACE_STD + +#include "detail/__pragma_pop" + +#endif //_CUDA_CHRONO + + diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/climits b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/climits new file mode 100644 index 000000000000..7c52a91b5ae2 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/climits @@ -0,0 +1,106 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_CLIMITS +#define _CUDA_CLIMITS + +#ifndef __CUDACC_RTC__ + #include + #include + #include +#else + #define CHAR_BIT 8 + + #define SCHAR_MIN (-128) + #define SCHAR_MAX 127 + #define UCHAR_MAX 255 + #define __CHAR_UNSIGNED__ ('\xff' > 0) // CURSED + #if __CHAR_UNSIGNED__ + #define CHAR_MIN 0 + #define CHAR_MAX UCHAR_MAX + #else + #define CHAR_MIN SCHAR_MIN + #define CHAR_MAX SCHAR_MAX + #endif + #define SHRT_MIN (-SHRT_MAX - 1) + #define SHRT_MAX 0x7fff + #define USHRT_MAX 0xffff + #define INT_MIN (-INT_MAX - 1) + #define INT_MAX 0x7fffffff + #define UINT_MAX 0xffffffff + #define LONG_MIN (-LONG_MAX - 1) + #ifdef __LP64__ + #define LONG_MAX LLONG_MAX + #define ULONG_MAX ULLONG_MAX + #else + #define LONG_MAX INT_MAX + #define ULONG_MAX UINT_MAX + #endif + #define LLONG_MIN (-LLONG_MAX - 1) + #define LLONG_MAX 0x7fffffffffffffff + #define ULLONG_MAX 0xffffffffffffffff + + #define __FLT_RADIX__ 2 + #define __FLT_MANT_DIG__ 24 + #define __FLT_DIG__ 6 + #define __FLT_MIN__ 1.17549435082228750796873653722224568e-38F + #define __FLT_MAX__ 3.40282346638528859811704183484516925e+38F + #define __FLT_EPSILON__ 1.19209289550781250000000000000000000e-7F + #define __FLT_MIN_EXP__ (-125) + #define __FLT_MIN_10_EXP__ (-37) + #define __FLT_MAX_EXP__ 128 + #define __FLT_MAX_10_EXP__ 38 + #define __FLT_DENORM_MIN__ 1.40129846432481707092372958328991613e-45F + #define __DBL_MANT_DIG__ 53 + #define __DBL_DIG__ 15 + #define __DBL_MIN__ 2.22507385850720138309023271733240406e-308 + #define __DBL_MAX__ 1.79769313486231570814527423731704357e+308 + #define __DBL_EPSILON__ 2.22044604925031308084726333618164062e-16 + #define __DBL_MIN_EXP__ (-1021) + #define __DBL_MIN_10_EXP__ (-307) + #define __DBL_MAX_EXP__ 1024 + #define __DBL_MAX_10_EXP__ 308 + #define __DBL_DENORM_MIN__ 4.94065645841246544176568792868221372e-324 + + template + static __device__ __forceinline__ + _To __cowchild_cast(_From __from) + { + static_assert(sizeof(_From) == sizeof(_To), ""); + union __cast { _From __from; _To __to; }; + __cast __c; + __c.__from = __from; + return __c.__to; + } + + #define __builtin_huge_valf() __cowchild_cast(0x7f800000) + #define __builtin_nanf(__dummy) __cowchild_cast(0x7fc00000) + #define __builtin_nansf(__dummy) __cowchild_cast(0x7fa00000) + #define __builtin_huge_val() __cowchild_cast(0x7ff0000000000000) + #define __builtin_nan(__dummy) __cowchild_cast(0x7ff8000000000000) + #define __builtin_nans(__dummy) __cowchild_cast(0x7ff4000000000000) +#endif //__CUDACC_RTC__ + +#include "detail/__config" + +#include "detail/__pragma_push" + +#include "detail/libcxx/include/climits" + +// ICC defines __CHAR_BIT__ by default +// accept that, but assert it is what we expect +#ifdef __CHAR_BIT__ + static_assert(__CHAR_BIT__ == 8, ""); +#else + #define __CHAR_BIT__ 8 +#endif + +#include "detail/__pragma_pop" + +#endif //_CUDA_CLIMITS diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cmath b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cmath new file mode 100644 index 000000000000..a186b93ceebf --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cmath @@ -0,0 +1,27 @@ +//===----------------------------------------------------------------------===// +// +// Part of the CUDA Toolkit, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_CMATH +#define _CUDA_CMATH + +#include "limits" +#include "type_traits" + +#include "detail/__config" + +#include "detail/__pragma_push" + +#ifndef _LIBCUDACXX_COMPILER_NVRTC +#include +#endif +#include "detail/libcxx/include/cmath" + +#include "detail/__pragma_pop" + +#endif //_CUDA_CMATH + diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/complex b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/complex new file mode 100644 index 000000000000..69ec036d2489 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/complex @@ -0,0 +1,27 @@ +//===----------------------------------------------------------------------===// +// +// Part of the CUDA Toolkit, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_COMPLEX +#define _CUDA_COMPLEX + +#include "cmath" +#include "cstdint" +#include "climits" +#include "type_traits" + +#include "detail/__config" + +#include "detail/__pragma_push" + +#include "detail/libcxx/include/complex" + +#include "detail/__pragma_pop" + +#endif //_CUDA_COMPLEX + + diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cstddef b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cstddef new file mode 100644 index 000000000000..63a9dada91ed --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cstddef @@ -0,0 +1,40 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_CSTDDEF +#define _CUDA_CSTDDEF + +#ifndef __CUDACC_RTC__ + #include + #include +#else + #define offsetof(type, member) (cuda::std::size_t)((char*)&(((type *)0)->member) - (char*)0) +#endif //__CUDACC_RTC__ + +#include "version" + +#include "detail/__config" + +#include "detail/__pragma_push" + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +typedef decltype(nullptr) nullptr_t; + +_LIBCUDACXX_END_NAMESPACE_STD + +#include "detail/libcxx/include/cstddef" + +#if _LIBCUDACXX_STD_VER > 14 + #include "type_traits" +#endif + +#include "detail/__pragma_pop" + +#endif //_CUDA_CSTDDEF diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cstdint b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cstdint new file mode 100644 index 000000000000..e95accd4a4e0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/cstdint @@ -0,0 +1,90 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_CSTDINT +#define _CUDA_CSTDINT + +#ifndef __CUDACC_RTC__ + #include + #include +#else + typedef signed char int8_t; + typedef unsigned char uint8_t; + typedef signed short int16_t; + typedef unsigned short uint16_t; + typedef signed int int32_t; + typedef unsigned int uint32_t; + typedef signed long long int64_t; + typedef unsigned long long uint64_t; + +#define _LIBCUDACXX_ADDITIONAL_INTS(N) \ + typedef int##N##_t int_fast##N##_t; \ + typedef uint##N##_t uint_fast##N##_t; \ + typedef int##N##_t int_least##N##_t; \ + typedef uint##N##_t uint_least##N##_t + + _LIBCUDACXX_ADDITIONAL_INTS(8); + _LIBCUDACXX_ADDITIONAL_INTS(16); + _LIBCUDACXX_ADDITIONAL_INTS(32); + _LIBCUDACXX_ADDITIONAL_INTS(64); +#undef _LIBCUDACXX_ADDITIONAL_INTS + + typedef int64_t intptr_t; + typedef uint64_t uintptr_t; + typedef int64_t intmax_t; + typedef uint64_t uintmax_t; + + #define INT8_MIN SCHAR_MIN + #define INT16_MIN SHRT_MIN + #define INT32_MIN INT_MIN + #define INT64_MIN LLONG_MIN + #define INT8_MAX SCHAR_MAX + #define INT16_MAX SHRT_MAX + #define INT32_MAX INT_MAX + #define INT64_MAX LLONG_MAX + #define UINT8_MAX UCHAR_MAX + #define UINT16_MAX USHRT_MAX + #define UINT32_MAX UINT_MAX + #define UINT64_MAX ULLONG_MAX + #define INT_FAST8_MIN SCHAR_MIN + #define INT_FAST16_MIN SHRT_MIN + #define INT_FAST32_MIN INT_MIN + #define INT_FAST64_MIN LLONG_MIN + #define INT_FAST8_MAX SCHAR_MAX + #define INT_FAST16_MAX SHRT_MAX + #define INT_FAST32_MAX INT_MAX + #define INT_FAST64_MAX LLONG_MAX + #define UINT_FAST8_MAX UCHAR_MAX + #define UINT_FAST16_MAX USHRT_MAX + #define UINT_FAST32_MAX UINT_MAX + #define UINT_FAST64_MAX ULLONG_MAX + + #define INT8_C(X) ((int_least8_t)(X)) + #define INT16_C(X) ((int_least16_t)(X)) + #define INT32_C(X) ((int_least32_t)(X)) + #define INT64_C(X) ((int_least64_t)(X)) + #define UINT8_C(X) ((uint_least8_t)(X)) + #define UINT16_C(X) ((uint_least16_t)(X)) + #define UINT32_C(X) ((uint_least32_t)(X)) + #define UINT64_C(X) ((uint_least64_t)(X)) + #define INTMAX_C(X) ((intmax_t)(X)) + #define UINTMAX_C(X) ((uintmax_t)(X)) +#endif //__CUDACC_RTC__ + +#include "version" +#include "climits" +#include "detail/__config" + +#include "detail/__pragma_push" + +#include "detail/libcxx/include/cstdint" + +#include "detail/__pragma_pop" + +#endif //_CUDA_CSTDINT diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/ctime b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/ctime new file mode 100644 index 000000000000..1825c373bbb3 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/ctime @@ -0,0 +1,27 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA_CTIME +#define _CUDA_CTIME + +#ifndef __CUDACC_RTC__ + #include +#else + typedef long long int time_t; +#endif + +#include "detail/__config" + +#include "detail/__pragma_push" + +#include "detail/libcxx/include/ctime" + +#include "detail/__pragma_pop" + +#endif //_CUDA_CTIME diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__access_property b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__access_property new file mode 100644 index 000000000000..d07a4bab3721 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__access_property @@ -0,0 +1,323 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * NVIDIA SOFTWARE LICENSE + * + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. + * + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. + * + * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. + * + * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: + * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. + * + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. + * + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. + * + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. + * + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + * + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. + * + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. + * + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. + * + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. + * + * (v. August 20, 2021) + */ + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + +namespace __detail_ap { + + __host__ __device__ + constexpr uint32_t __ap_floor_log2(uint32_t __x) { + return (__x == 1 | __x == 0) ? 0 : 1 + __ap_floor_log2(__x >> 1); + } + + __host__ __device__ + constexpr uint32_t __ap_ceil_log2(uint32_t __x) { + return (__x == 1 | __x == 0) ? 0 : __ap_floor_log2(__x - 1) + 1; + } + + __host__ __device__ + constexpr uint32_t __ap_min(uint32_t __a, uint32_t __b) noexcept { + return (__a < __b) ? __a : __b; + } + + __host__ __device__ + constexpr uint32_t __ap_max(uint32_t __a, uint32_t __b) noexcept { + return (__a > __b) ? __a : __b; + } + +// GCC-7 and below do not properly assume the required number of bits for enumerations +#if defined(__GNUC__) && __GNUC__ < 8 +# define _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION +#else +# define _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION : uint64_t +#endif + + namespace __sm_80 { + namespace __off { + enum __l2_cop_off_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION { + _L2_EVICT_NORMAL = 0, + _L2_EVICT_FIRST = 1, + }; + } // namespace __off + + namespace __on { + enum __l2_cop_on_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION { + _L2_EVICT_NORMAL = 0, + _L2_EVICT_FIRST = 1, + _L2_EVICT_LAST = 2, + _L2_EVICT_NORMAL_DEMOTE = 3, + }; + } // namespace __on + + enum __l2_descriptor_mode_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION { + _DESC_IMPLICIT = 0, + _DESC_INTERLEAVED = 2, + _DESC_BLOCK_TYPE = 3, + }; + + enum __l2_eviction_max_way_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION { + _CUDA_AMPERE_MAX_L2_WAYS = std::uint32_t{16}, + }; + + enum __block_size_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION { + _BLOCKSIZE_4K = 0, + _BLOCKSIZE_8K = 1, + _BLOCKSIZE_16K = 2, + _BLOCKSIZE_32K = 3, + _BLOCKSIZE_64K = 4, + _BLOCKSIZE_128K = 5, + _BLOCKSIZE_256K = 6, + _BLOCKSIZE_512K = 7, + _BLOCKSIZE_1M = 8, + _BLOCKSIZE_2M = 9, + _BLOCKSIZE_4M = 10, + _BLOCKSIZE_8M = 11, + _BLOCKSIZE_16M = 12, + _BLOCKSIZE_32M = 13, + }; + + struct __block_desc_t { + uint64_t __ap_reserved : 37; + uint64_t __block_count: 7; + uint64_t __block_start: 7; + uint64_t __ap_reserved2 : 1; + __block_size_t __block_size : 4; + __off::__l2_cop_off_t __l2_cop_off : 1; + __on::__l2_cop_on_t __l2_cop_on : 2; + __l2_descriptor_mode_t __l2_descriptor_mode : 2; + uint64_t __l1_inv_dont_allocate : 1; + uint64_t __l2_sector_promote_256B : 1; + uint64_t __ap_reserved3 : 1; + + __host__ __device__ + constexpr std::uint64_t __get_descriptor_cexpr() const noexcept { + return + std::uint64_t(__ap_reserved) << 0 | + std::uint64_t(__block_count) << 37 | + std::uint64_t(__block_start) << 44 | + std::uint64_t(__ap_reserved2) << 51 | + std::uint64_t(__block_size) << 52 | + std::uint64_t(__l2_cop_off) << 56 | + std::uint64_t(__l2_cop_on) << 57 | + std::uint64_t(__l2_descriptor_mode) << 59 | + std::uint64_t(__l1_inv_dont_allocate) << 61 | + std::uint64_t(__l2_sector_promote_256B) << 62 | + std::uint64_t(__ap_reserved3) << 63; + } + + inline + __host__ __device__ + std::uint64_t __get_descriptor_non_cexpr() const noexcept { return *reinterpret_cast(this); } + + __host__ __device__ + constexpr std::uint64_t __get_descriptor() const noexcept { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) + return cuda::std::is_constant_evaluated() ? + __get_descriptor_cexpr() : + __get_descriptor_non_cexpr(); +#else + return __get_descriptor_cexpr(); +#endif + } + }; + static_assert(sizeof(__block_desc_t) == 8, "__block_desc_t should be 8 bytes"); + static_assert(sizeof(__block_desc_t) == sizeof(std::uint64_t), ""); + static_assert( + __block_desc_t{(uint64_t)1, (uint64_t)1, (uint64_t)1, (uint64_t)1, __block_size_t::_BLOCKSIZE_8K, __off::_L2_EVICT_FIRST, __on::_L2_EVICT_FIRST, __l2_descriptor_mode_t::_DESC_INTERLEAVED, (uint64_t)1, (uint64_t)1, (uint64_t)1}.__get_descriptor() + == 0xF318102000000001, ""); + + /* Factory like struct to build a __block_desc_t due to constexpr C++11 + */ + struct __block_descriptor_builder { //variable declaration order matters == usage order + std::uint32_t __offset; + __block_size_t __block_size; + std::uint32_t __block_start, __end_hit; + std::uint32_t __block_count; + __off::__l2_cop_off_t __l2_cop_off; + __on::__l2_cop_on_t __l2_cop_on; + __l2_descriptor_mode_t __l2_descriptor_mode; + bool __l1_inv_dont_allocate, __l2_sector_promote_256B; + + __host__ __device__ static constexpr std::uint32_t __calc_offset(std::size_t __total_bytes) { + return __ap_max(std::uint32_t{12}, static_cast(__ap_ceil_log2(static_cast(__total_bytes))) - std::uint32_t{7}); + } + + __host__ __device__ static constexpr std::uint32_t __calc_block_start(std::uintptr_t __ptr, std::size_t __total_bytes) { + return static_cast(__ptr >> __calc_offset(static_cast(__total_bytes))); + } + + __host__ __device__ static constexpr std::uint32_t __calc_end_hit(std::uintptr_t __ptr, std::size_t __hit_bytes, std::size_t __total_bytes) { + return static_cast((__ptr + __hit_bytes + (std::uintptr_t{1} << (__calc_offset(static_cast(__total_bytes)))) - 1) >> __calc_offset(static_cast(__total_bytes))); + } + + __host__ __device__ constexpr __block_descriptor_builder(std::uintptr_t __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, __on::__l2_cop_on_t __hit_prop, __off::__l2_cop_off_t __miss_prop) + : __offset(__calc_offset(__total_bytes)) + , __block_size(static_cast<__block_size_t>(__calc_offset(__total_bytes) - std::uint32_t{12})) + , __block_start(__calc_block_start(__ptr, __total_bytes)) + , __end_hit(__calc_end_hit(__ptr, __hit_bytes, __total_bytes)) + , __block_count(__calc_end_hit(__ptr, __hit_bytes, __total_bytes) - __calc_block_start(__ptr, __total_bytes)) + , __l2_cop_off(__miss_prop) + , __l2_cop_on(__hit_prop) + , __l2_descriptor_mode(_DESC_BLOCK_TYPE) + , __l1_inv_dont_allocate(false) + , __l2_sector_promote_256B(false) + {} + + __host__ __device__ + constexpr __block_desc_t __get_block() const noexcept { + return __block_desc_t { 0, __ap_min(std::uint32_t{0x7f}, __block_count), (__block_start & std::uint32_t{0x7f}), 0, __block_size, __l2_cop_off, __l2_cop_on, _DESC_BLOCK_TYPE, false, false, 0 }; + } + }; + static_assert(sizeof(std::uintptr_t) > 4, "std::uintptr_t needs at least 5 bytes for this code to work"); + + struct __interleave_descriptor_t { + uint64_t __ap_reserved : 52; + uint64_t __fraction : 4; + __off::__l2_cop_off_t __l2_cop_off : 1; + __on::__l2_cop_on_t __l2_cop_on : 2; + __l2_descriptor_mode_t __l2_descriptor_mode : 2; + uint64_t __l1_inv_dont_allocate : 1; + uint64_t __l2_sector_promote_256B : 1; + uint64_t __ap_reserved2 : 1; + + __host__ __device__ + constexpr __interleave_descriptor_t( + __on::__l2_cop_on_t __hit_prop, + std::uint32_t __hit_ratio, + __off::__l2_cop_off_t __miss_prop) noexcept + : __fraction(__hit_ratio), + __l2_cop_off(__miss_prop), + __l2_cop_on(__hit_prop), + __l2_descriptor_mode(_DESC_INTERLEAVED), + __l1_inv_dont_allocate(0x0), + __l2_sector_promote_256B(0x0), + __ap_reserved(0x0), + __ap_reserved2(0x0) {} + + __host__ __device__ + constexpr std::uint64_t __get_descriptor_cexpr() const { + return + std::uint64_t(__ap_reserved) << 0 | + std::uint64_t(__fraction) << 52 | + std::uint64_t(__l2_cop_off) << 56 | + std::uint64_t(__l2_cop_on) << 57 | + std::uint64_t(__l2_descriptor_mode) << 59 | + std::uint64_t(__l1_inv_dont_allocate) << 61 | + std::uint64_t(__l2_sector_promote_256B) << 62 | + std::uint64_t(__ap_reserved2) << 63; + } + + inline + __host__ __device__ + std::uint64_t __get_descriptor_non_cexpr() const noexcept { return *reinterpret_cast(this); } + + + __host__ __device__ + constexpr std::uint64_t __get_descriptor() const noexcept { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) + return cuda::std::is_constant_evaluated() ? + __get_descriptor_cexpr() : + __get_descriptor_non_cexpr(); +#else + return __get_descriptor_cexpr(); +#endif + } + }; + static_assert(sizeof(__interleave_descriptor_t) == 8, "__interleave_descriptor_t should be 8 bytes"); + static_assert(sizeof(__interleave_descriptor_t) == sizeof(std::uint64_t), ""); + + __host__ __device__ + static constexpr std::uint64_t __interleave_normal() noexcept { + return 0x10F0000000000000; + } + + __host__ __device__ + static constexpr std::uint64_t __interleave_streaming() noexcept { + return 0x12F0000000000000; + } + + __host__ __device__ + static constexpr std::uint64_t __interleave_persisting() noexcept { + return 0x14F0000000000000; + } + + __host__ __device__ + static constexpr std::uint64_t __interleave_normal_demote() noexcept { + return 0x16F0000000000000; + } + + } // namespace __sm_80 + + __host__ __device__ + constexpr std::uint64_t __interleave(cudaAccessProperty __hit_prop, float __hit_ratio, cudaAccessProperty __miss_prop = cudaAccessPropertyNormal) { + return __sm_80::__interleave_descriptor_t( + ((__hit_prop == cudaAccessPropertyNormal) ? __sm_80::__on::__l2_cop_on_t::_L2_EVICT_NORMAL_DEMOTE : static_cast<__sm_80::__on::__l2_cop_on_t>(__hit_prop)), + __ap_min((static_cast(__hit_ratio) * __sm_80::__l2_eviction_max_way_t::_CUDA_AMPERE_MAX_L2_WAYS), static_cast(__sm_80::__l2_eviction_max_way_t::_CUDA_AMPERE_MAX_L2_WAYS - 1)), + static_cast<__sm_80::__off::__l2_cop_off_t>(__miss_prop) + ).__get_descriptor(); + } + + __host__ __device__ + constexpr std::uint64_t __block(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, cudaAccessProperty __hit_prop, cudaAccessProperty __miss_prop = cudaAccessPropertyNormal) { + return (__total_bytes <= (size_t{0xFFFFFFFF}) & __total_bytes != 0 & __hit_bytes <= __total_bytes) ? __sm_80::__block_descriptor_builder( + reinterpret_cast(__ptr), + __hit_bytes, + __total_bytes, + (__hit_prop == cudaAccessPropertyNormal) ? __sm_80::__on::_L2_EVICT_NORMAL_DEMOTE : static_cast<__sm_80::__on::__l2_cop_on_t>(__hit_prop), + static_cast<__sm_80::__off::__l2_cop_off_t>(__miss_prop) + ).__get_block().__get_descriptor() + : __sm_80::__interleave_normal(); + } +} // namespace __detail_ap + +_LIBCUDACXX_END_NAMESPACE_CUDA diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__annotated_ptr b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__annotated_ptr new file mode 100644 index 000000000000..4c7ad4080615 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__annotated_ptr @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * NVIDIA SOFTWARE LICENSE + * + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. + * + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. + * + * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. + * + * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: + * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. + * + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. + * + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. + * + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. + * + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + * + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. + * + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. + * + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. + * + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. + * + * (v. August 20, 2021) + */ + +_LIBCUDACXX_BEGIN_NAMESPACE_CUDA + +namespace __detail_ap { + + template + __device__ + void* __associate_address_space(void* __ptr, _Property __prop) { + if (std::is_same<_Property, access_property::shared>::value == true) { + bool __b = __isShared(__ptr); +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_DEBUG_ASSERT(__b == true); +#endif + __builtin_assume(__b); + } else if (std::is_same<_Property, access_property::global>::value == true || + std::is_same<_Property, access_property::normal>::value == true || + std::is_same<_Property, access_property::persisting>::value == true || + std::is_same<_Property, access_property::streaming>::value == true || + std::is_same<_Property, access_property>::value) { + bool __b = __isGlobal(__ptr); +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_DEBUG_ASSERT(__b == true); +#endif + __builtin_assume(__b); + } + + return __ptr; + } + + template + __device__ + void* __associate_descriptor(void* __ptr, __Prop __prop) { + return __associate_descriptor(__ptr, static_cast(access_property(__prop))); + } + + template <> + inline __device__ + void* __associate_descriptor(void* __ptr, std::uint64_t __prop) { +#if __CUDA_ARCH__ >= 800 + return __nv_associate_access_property(__ptr, __prop); +#else + return __ptr; +#endif + } + + template<> + inline __device__ + void* __associate_descriptor(void* __ptr, access_property::shared) { + return __ptr; + } + + template + __host__ __device__ + _Type* __associate(_Type* __ptr, _Property __prop) { +#ifdef __CUDA_ARCH__ + return static_cast<_Type*>(__associate_descriptor( + __associate_address_space(const_cast(static_cast(__ptr)), __prop), + __prop)); +#else + return __ptr; +#endif + } + + + template + class __annotated_ptr_base { + using __error = typename _Property::__unknown_access_property_type; + }; + + template<> + class __annotated_ptr_base { + protected: + static constexpr std::uint64_t __prop = 0; + + constexpr __annotated_ptr_base() noexcept = default; + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + __host__ __device__ constexpr __annotated_ptr_base(access_property::shared) noexcept {} + inline __device__ void* __apply_prop(void* __p) const { + return __associate(__p, access_property::shared{}); + } + __host__ __device__ constexpr access_property::shared __get_property() const noexcept { + return access_property::shared{}; + } + }; + + template<> + class __annotated_ptr_base { + protected: + static constexpr std::uint64_t __prop = __sm_80::__interleave_normal(); + + constexpr __annotated_ptr_base() noexcept = default; + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + __host__ __device__ constexpr __annotated_ptr_base(access_property::global) noexcept {} + inline __device__ void* __apply_prop(void* __p) const { + return __associate(__p, access_property::global{}); + } + __host__ __device__ constexpr access_property::global __get_property() const noexcept { + return access_property::global{}; + } + }; + + template<> + class __annotated_ptr_base { + protected: + static constexpr std::uint64_t __prop = __sm_80::__interleave_normal_demote(); + + constexpr __annotated_ptr_base() noexcept = default; + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + __host__ __device__ constexpr __annotated_ptr_base(access_property::normal) noexcept {} + inline __device__ void* __apply_prop(void* __p) const { + return __associate(__p, access_property::normal{}); + } + __host__ __device__ constexpr access_property::normal __get_property() const noexcept { + return access_property::normal{}; + } + }; + + template<> + class __annotated_ptr_base { + protected: + static constexpr std::uint64_t __prop = __sm_80::__interleave_persisting(); + + constexpr __annotated_ptr_base() noexcept = default; + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + __host__ __device__ constexpr __annotated_ptr_base(access_property::persisting) noexcept {} + inline __device__ void* __apply_prop(void* __p) const { + return __associate(__p, access_property::persisting{}); + } + __host__ __device__ constexpr access_property::persisting __get_property() const noexcept { + return access_property::persisting{}; + } + }; + + template<> + class __annotated_ptr_base { + protected: + static constexpr std::uint64_t __prop = __sm_80::__interleave_streaming(); + + constexpr __annotated_ptr_base() noexcept = default; + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + __host__ __device__ constexpr __annotated_ptr_base(access_property::streaming) noexcept {} + inline __device__ void* __apply_prop(void* __p) const { + return __associate(__p, access_property::streaming{}); + } + __host__ __device__ constexpr access_property::streaming __get_property() const noexcept { + return access_property::streaming{}; + } + }; + + template<> + class __annotated_ptr_base { + protected: + std::uint64_t __prop; + + __host__ __device__ constexpr __annotated_ptr_base() noexcept : __prop(access_property()) {} + __host__ __device__ constexpr __annotated_ptr_base(std::uint64_t __property) noexcept : __prop(__property) {} + __host__ __device__ constexpr __annotated_ptr_base(access_property __property) noexcept + : __annotated_ptr_base(static_cast(__property)) {} + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + inline __device__ void* __apply_prop(void* __p) const { + return __associate(__p, __prop); + } + __host__ __device__ access_property __get_property() const noexcept { + return reinterpret_cast(const_cast(__prop)); + } + }; +} // namespace __detail_ap + +_LIBCUDACXX_END_NAMESPACE_CUDA diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__config b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__config new file mode 100644 index 000000000000..039911f2e2ce --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__config @@ -0,0 +1,196 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef __cuda_std__ +#define __cuda_std__ + +#ifdef __CUDACC__ + #if defined(__clang__) + #include + #define __fp16 __half + #endif + #if defined(__FLT16_MANT_DIG__) + #include + #define _Float16 __half + #endif + #define _LIBCUDACXX_CUDACC_VER_MAJOR __CUDACC_VER_MAJOR__ + #define _LIBCUDACXX_CUDACC_VER_MINOR __CUDACC_VER_MINOR__ + #define _LIBCUDACXX_CUDACC_VER_BUILD __CUDACC_VER_BUILD__ + #define _LIBCUDACXX_CUDACC_VER \ + _LIBCUDACXX_CUDACC_VER_MAJOR * 100000 + _LIBCUDACXX_CUDACC_VER_MINOR * 1000 + \ + _LIBCUDACXX_CUDACC_VER_BUILD + + #define _LIBCUDACXX_HAS_NO_LONG_DOUBLE +#else + #ifndef __host__ + #define __host__ + #endif + #ifndef __device__ + #define __device__ + #endif + #ifndef __forceinline__ + #define __forceinline__ + #endif +#endif + +#ifdef _MSC_VER + #undef __cpp_lib_transparent_operators +#endif + +// request these outcomes +#define _LIBCUDACXX_NO_AUTO_LINK +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + #define _LIBCUDACXX_NO_EXCEPTIONS +#endif +#ifndef _LIBCUDACXX_NO_RTTI + #define _LIBCUDACXX_NO_RTTI +#endif +// Disable the nodebug type +#ifndef _LIBCUDACXX_NODEBUG_TYPE + #define _LIBCUDACXX_NODEBUG_TYPE +#endif + +#if defined(__CUDACC_RTC__) +# if defined(__CUDACC_RTC_INT128__) +# define __SIZEOF_INT128__ 16 // This macro is required in order to use int128 within the library +# else +# define _LIBCUDACXX_HAS_NO_INT128 +# endif +#endif + +#if defined(_LIBCUDACXX_COMPILER_MSVC) || (defined(_LIBCUDACXX_CUDACC_VER) && (_LIBCUDACXX_CUDACC_VER < 1105000)) +# define _LIBCUDACXX_HAS_NO_INT128 +#endif + +#ifndef _LIBCUDACXX_ASSERT +# define _LIBCUDACXX_ASSERT(x, m) ((void)0) +#endif +#define _LIBCUDACXX_FREESTANDING +#define _LIBCUDACXX_HAS_NO_WCHAR_H +#define _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE +#define _LIBCUDACXX_HAS_NO_PLATFORM_WAIT +#define _LIBCUDACXX_HAS_NO_MONOTONIC_CLOCK +#define _LIBCUDACXX_HAS_NO_TREE_BARRIER +#ifdef __CUDACC_RTC__ + #define __ELF__ + #define _LIBCUDACXX_DISABLE_PRAGMA_GCC_SYSTEM_HEADER + #define _LIBCUDACXX_HAS_THREAD_API_EXTERNAL + #define __alignof(x) alignof(x) + #define _LIBCUDACXX_LITTLE_ENDIAN + #define _LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS + #define _LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO +#endif +#define _LIBCUDACXX_HAS_EXTERNAL_ATOMIC_IMP +#define _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(size, ptr) (size <= 8) +#define _LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS + +#define _LIBCUDACXX_SYS_CLOCK_DURATION nanoseconds + +#if defined(__PGIC__) && defined(__linux__) + #define __ELF__ +#endif + +#define _LIBCUDACXX_ENABLE_BIT_BACKPORT + +#define _LIBCUDACXX_HAS_CUDA_ATOMIC_EXT + +#include "libcxx/include/__config" + +#if defined(__CUDA_ARCH__) + #define _LIBCUDACXX_HAS_THREAD_API_CUDA +#elif defined(_LIBCUDACXX_COMPILER_MSVC) + #define _LIBCUDACXX_HAS_THREAD_API_WIN32 +#endif + +#if _GNUC_VER <= 409 + #define _LIBCUDACXX_CUDA_HAS_NO_HOST_STD_ATOMIC_INIT +#endif + +// force this outcome +#undef _LIBCUDACXX_EXECUTION_SPACE_SPECIFIER +#define _LIBCUDACXX_EXECUTION_SPACE_SPECIFIER __host__ __device__ +#undef _LIBCUDACXX_ATOMIC_FLAG_TYPE +#define _LIBCUDACXX_ATOMIC_FLAG_TYPE int +#undef _LIBCUDACXX_INLINE_VISIBILITY +#define _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_EXECUTION_SPACE_SPECIFIER +#undef _LIBCUDACXX_FUNC_VIS +#define _LIBCUDACXX_FUNC_VIS _LIBCUDACXX_INLINE_VISIBILITY +#undef _LIBCUDACXX_TYPE_VIS +#define _LIBCUDACXX_TYPE_VIS + +#ifndef _LIBCUDACXX_ABI_UNSTABLE +# define _LIBCUDACXX_ABI_UNSTABLE +#endif + +#define _LIBCUDACXX_CUDA_API_VERSION 1008001 + +#define _LIBCUDACXX_CUDA_API_VERSION_MAJOR \ + (_LIBCUDACXX_CUDA_API_VERSION / 1000000) + +#define _LIBCUDACXX_CUDA_API_VERSION_MINOR \ + (_LIBCUDACXX_CUDA_API_VERSION / 1000 % 1000) + +#define _LIBCUDACXX_CUDA_API_VERSION_PATCH \ + (_LIBCUDACXX_CUDA_API_VERSION % 1000) + +#ifndef _LIBCUDACXX_CUDA_ABI_VERSION_LATEST +# define _LIBCUDACXX_CUDA_ABI_VERSION_LATEST 4 +#endif + +#ifdef _LIBCUDACXX_CUDA_ABI_VERSION +# if _LIBCUDACXX_CUDA_ABI_VERSION != 2 && _LIBCUDACXX_CUDA_ABI_VERSION != 3 && _LIBCUDACXX_CUDA_ABI_VERSION != 4 +# error Unsupported libcu++ ABI version requested. Please define _LIBCUDACXX_CUDA_ABI_VERSION to either 2 or 3. +# endif +#else +# define _LIBCUDACXX_CUDA_ABI_VERSION _LIBCUDACXX_CUDA_ABI_VERSION_LATEST +#endif + +#ifdef _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION +# if _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION != _LIBCUDACXX_CUDA_ABI_VERSION +# error cuda_pipeline.h has assumed a different libcu++ ABI version than provided by this library. To fix this, please include a libcu++ header before including cuda_pipeline.h, or upgrade to a version of the toolkit this version of libcu++ shipped in. +# endif +#endif + +#ifndef _LIBCUDACXX_CUDA_ABI_NAMESPACE +# define _LIBCUDACXX_CUDA_ABI_NAMESPACE _LIBCUDACXX_CONCAT(__,_LIBCUDACXX_CUDA_ABI_VERSION) +#endif + +#ifndef _LIBCUDACXX_BEGIN_NAMESPACE_CUDA +# define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA \ + namespace cuda { inline namespace _LIBCUDACXX_CUDA_ABI_NAMESPACE { +#endif +#ifndef _LIBCUDACXX_END_NAMESPACE_CUDA +# define _LIBCUDACXX_END_NAMESPACE_CUDA } } +#endif + +#ifndef _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE +# define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE \ + namespace cuda { namespace device { inline namespace _LIBCUDACXX_CUDA_ABI_NAMESPACE { +#endif +#ifndef _LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE +# define _LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE } } } +#endif + +// redefine namespace std:: +#undef _LIBCUDACXX_BEGIN_NAMESPACE_STD +#define _LIBCUDACXX_BEGIN_NAMESPACE_STD \ + namespace cuda { namespace std { inline namespace _LIBCUDACXX_CUDA_ABI_NAMESPACE { + +#undef _LIBCUDACXX_END_NAMESPACE_STD +#define _LIBCUDACXX_END_NAMESPACE_STD } } } + +#undef _CUDA_VSTD +#define _CUDA_VSTD cuda::std::_LIBCUDACXX_CUDA_ABI_NAMESPACE + +#undef _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION +#define _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION namespace cuda { namespace std { +#undef _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION +#define _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION } } + +#endif //__cuda_std__ diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__functional_base b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__functional_base new file mode 100644 index 000000000000..731645d0066b --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__functional_base @@ -0,0 +1,25 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA___FUNCTIONAL_BASE +#define _CUDA___FUNCTIONAL_BASE + +#ifndef __CUDACC_RTC__ + #include +#endif + +#include "../chrono" +#include "../climits" + +#include "__config" + +#include "libcxx/include/__functional_base" + +#endif //_CUDA___FUNCTIONAL_BASE + diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__pragma_pop b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__pragma_pop new file mode 100644 index 000000000000..a40f5f2bae21 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__pragma_pop @@ -0,0 +1,11 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "libcxx/include/__pragma_pop" + diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__pragma_push b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__pragma_push new file mode 100644 index 000000000000..e2aae6e48ccb --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__pragma_push @@ -0,0 +1,12 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "libcxx/include/__pragma_push" +#include "libcxx/include/__undef_macros" + diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__threading_support b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__threading_support new file mode 100644 index 000000000000..0216c5e8f348 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/__threading_support @@ -0,0 +1,26 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _CUDA___THREADING_SUPPORT +#define _CUDA___THREADING_SUPPORT + +#ifndef __CUDACC_RTC__ + #include + #include +#endif + + +#include "../chrono" +#include "../climits" + +#include "__config" + +#include "libcxx/include/__threading_support" + +#endif //_CUDA___THREADING_SUPPORT diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/CMakeLists.txt b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/CMakeLists.txt new file mode 100644 index 000000000000..d59bf2b1c1ed --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/CMakeLists.txt @@ -0,0 +1,277 @@ +set(files + __bit_reference + __bsd_locale_defaults.h + __bsd_locale_fallbacks.h + __errc + __debug + __functional_03 + __functional_base + __functional_base_03 + __hash_table + __libcpp_version + __locale + __mutex_base + __node_handle + __nullptr + __split_buffer + __sso_allocator + __std_stream + __string + __threading_support + __tree + __tuple + __undef_macros + algorithm + any + array + atomic + barrier + bit + bitset + cassert + ccomplex + cctype + cerrno + cfenv + cfloat + charconv + chrono + cinttypes + ciso646 + climits + clocale + cmath + codecvt + compare + complex + complex.h + condition_variable + csetjmp + csignal + cstdarg + cstdbool + cstddef + cstdint + cstdio + cstdlib + cstring + ctgmath + ctime + ctype.h + cwchar + cwctype + deque + errno.h + exception + execution + experimental/__config + experimental/__memory + experimental/algorithm + experimental/coroutine + experimental/deque + experimental/filesystem + experimental/forward_list + experimental/functional + experimental/iterator + experimental/list + experimental/map + experimental/memory_resource + experimental/propagate_const + experimental/regex + experimental/set + experimental/simd + experimental/string + experimental/type_traits + experimental/unordered_map + experimental/unordered_set + experimental/utility + experimental/vector + ext/__hash + ext/hash_map + ext/hash_set + fenv.h + filesystem + float.h + forward_list + fstream + functional + future + initializer_list + inttypes.h + iomanip + ios + iosfwd + iostream + istream + iterator + latch + limits + limits.h + list + locale + locale.h + map + math.h + memory + module.modulemap + mutex + new + numeric + optional + ostream + queue + random + ratio + regex + scoped_allocator + semaphore + set + setjmp.h + shared_mutex + span + sstream + stack + stdbool.h + stddef.h + stdexcept + stdint.h + stdio.h + stdlib.h + streambuf + string + string.h + string_view + strstream + system_error + tgmath.h + thread + tuple + type_traits + typeindex + typeinfo + unordered_map + unordered_set + utility + valarray + variant + vector + version + wchar.h + wctype.h + ) + +if(LIBCXX_INSTALL_SUPPORT_HEADERS) + set(files + ${files} + support/android/locale_bionic.h + support/fuchsia/xlocale.h + support/ibm/limits.h + support/ibm/locale_mgmt_aix.h + support/ibm/support.h + support/ibm/xlocale.h + support/musl/xlocale.h + support/newlib/xlocale.h + support/solaris/floatingpoint.h + support/solaris/wchar.h + support/solaris/xlocale.h + support/win32/limits_msvc_win32.h + support/win32/locale_win32.h + support/xlocale/__nop_locale_mgmt.h + support/xlocale/__posix_l_fallback.h + support/xlocale/__strtonum_fallback.h + ) +endif() + +if (LIBCXX_NEEDS_SITE_CONFIG) + # Generate a custom __config header. The new header is created + # by prepending __config_site to the current __config header. + add_custom_command(OUTPUT ${LIBCXX_BINARY_DIR}/__generated_config + COMMAND ${PYTHON_EXECUTABLE} ${LIBCXX_SOURCE_DIR}/utils/cat_files.py + ${LIBCXX_BINARY_DIR}/__config_site + ${LIBCXX_SOURCE_DIR}/include/__config + -o ${LIBCXX_BINARY_DIR}/__generated_config + DEPENDS ${LIBCXX_SOURCE_DIR}/include/__config + ${LIBCXX_BINARY_DIR}/__config_site + ) + # Add a target that executes the generation commands. + add_custom_target(cxx-generated-config ALL + DEPENDS ${LIBCXX_BINARY_DIR}/__generated_config) + set(generated_config_deps cxx-generated-config) +else() + set(files + ${files} + __config + ) +endif() + +# In some build configuraitons (like boostrapping clang), we need to be able to +# install the libcxx headers before CMake configuraiton for libcxx runs. Making +# the name of this target configurable allows LLVM/runtimes/CMakeLists.txt to +# add this subdirectory to the LLVM build to put libcxx's headers in place +# before libcxx's build configuration is run. +if (NOT CXX_HEADER_TARGET) + set(CXX_HEADER_TARGET cxx-headers) +endif() +if(NOT LIBCXX_USING_INSTALLED_LLVM AND LIBCXX_HEADER_DIR) + set(output_dir ${LIBCXX_HEADER_DIR}/include/c++/v1) + + set(out_files) + foreach(f ${files}) + set(src ${CMAKE_CURRENT_SOURCE_DIR}/${f}) + set(dst ${output_dir}/${f}) + add_custom_command(OUTPUT ${dst} + DEPENDS ${src} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src} ${dst} + COMMENT "Copying CXX header ${f}") + list(APPEND out_files ${dst}) + endforeach() + + if (LIBCXX_NEEDS_SITE_CONFIG) + # Copy the generated header as __config into build directory. + set(src ${LIBCXX_BINARY_DIR}/__generated_config) + set(dst ${output_dir}/__config) + add_custom_command(OUTPUT ${dst} + DEPENDS ${src} ${generated_config_deps} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src} ${dst} + COMMENT "Copying CXX __config") + list(APPEND out_files ${dst}) + endif() + + add_custom_target(${CXX_HEADER_TARGET} ALL DEPENDS ${out_files} ${LIBCXX_CXX_ABI_HEADER_TARGET}) +else() + add_custom_target(${CXX_HEADER_TARGET}) +endif() +set_target_properties(${CXX_HEADER_TARGET} PROPERTIES FOLDER "Misc") + +if (LIBCXX_INSTALL_HEADERS) + foreach(file ${files}) + get_filename_component(dir ${file} DIRECTORY) + install(FILES ${file} + DESTINATION ${LIBCXX_INSTALL_HEADER_PREFIX}include/c++/v1/${dir} + COMPONENT ${CXX_HEADER_TARGET} + PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ + ) + endforeach() + + if (LIBCXX_NEEDS_SITE_CONFIG) + # Install the generated header as __config. + install(FILES ${LIBCXX_BINARY_DIR}/__generated_config + DESTINATION ${LIBCXX_INSTALL_HEADER_PREFIX}include/c++/v1 + PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ + RENAME __config + COMPONENT ${CXX_HEADER_TARGET}) + endif() + + if (NOT CMAKE_CONFIGURATION_TYPES) + add_custom_target(install-${CXX_HEADER_TARGET} + DEPENDS ${CXX_HEADER_TARGET} ${generated_config_deps} + COMMAND "${CMAKE_COMMAND}" + -DCMAKE_INSTALL_COMPONENT=${CXX_HEADER_TARGET} + -P "${CMAKE_BINARY_DIR}/cmake_install.cmake") + # Stripping is a no-op for headers + add_custom_target(install-${CXX_HEADER_TARGET}-stripped DEPENDS install-${CXX_HEADER_TARGET}) + + add_custom_target(install-libcxx-headers DEPENDS install-${CXX_HEADER_TARGET}) + add_custom_target(install-libcxx-headers-stripped DEPENDS install-${CXX_HEADER_TARGET}-stripped) + endif() +endif() diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bit_reference b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bit_reference new file mode 100644 index 000000000000..851cbeacfc59 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bit_reference @@ -0,0 +1,1289 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___BIT_REFERENCE +#define _LIBCUDACXX___BIT_REFERENCE + +#include <__config> +#include +#include + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_PUSH_MACROS +#include <__undef_macros> + + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +template class __bit_iterator; +template class __bit_const_reference; + +template +struct __has_storage_type +{ + static const bool value = false; +}; + +template ::value> +class __bit_reference +{ + typedef typename _Cp::__storage_type __storage_type; + typedef typename _Cp::__storage_pointer __storage_pointer; + + __storage_pointer __seg_; + __storage_type __mask_; + + friend typename _Cp::__self; + + friend class __bit_const_reference<_Cp>; + friend class __bit_iterator<_Cp, false>; +public: + _LIBCUDACXX_INLINE_VISIBILITY + __bit_reference(const __bit_reference&) = default; + + _LIBCUDACXX_INLINE_VISIBILITY operator bool() const _NOEXCEPT + {return static_cast(*__seg_ & __mask_);} + _LIBCUDACXX_INLINE_VISIBILITY bool operator ~() const _NOEXCEPT + {return !static_cast(*this);} + + _LIBCUDACXX_INLINE_VISIBILITY + __bit_reference& operator=(bool __x) _NOEXCEPT + { + if (__x) + *__seg_ |= __mask_; + else + *__seg_ &= ~__mask_; + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY + __bit_reference& operator=(const __bit_reference& __x) _NOEXCEPT + {return operator=(static_cast(__x));} + + _LIBCUDACXX_INLINE_VISIBILITY void flip() _NOEXCEPT {*__seg_ ^= __mask_;} + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, false> operator&() const _NOEXCEPT + {return __bit_iterator<_Cp, false>(__seg_, static_cast(__libcpp_ctz(__mask_)));} +private: + _LIBCUDACXX_INLINE_VISIBILITY + __bit_reference(__storage_pointer __s, __storage_type __m) _NOEXCEPT + : __seg_(__s), __mask_(__m) {} +}; + +template +class __bit_reference<_Cp, false> +{ +}; + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(__bit_reference<_Cp> __x, __bit_reference<_Cp> __y) _NOEXCEPT +{ + bool __t = __x; + __x = __y; + __y = __t; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(__bit_reference<_Cp> __x, __bit_reference<_Dp> __y) _NOEXCEPT +{ + bool __t = __x; + __x = __y; + __y = __t; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(__bit_reference<_Cp> __x, bool& __y) _NOEXCEPT +{ + bool __t = __x; + __x = __y; + __y = __t; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(bool& __x, __bit_reference<_Cp> __y) _NOEXCEPT +{ + bool __t = __x; + __x = __y; + __y = __t; +} + +template +class __bit_const_reference +{ + typedef typename _Cp::__storage_type __storage_type; + typedef typename _Cp::__const_storage_pointer __storage_pointer; + + __storage_pointer __seg_; + __storage_type __mask_; + + friend typename _Cp::__self; + friend class __bit_iterator<_Cp, true>; +public: + _LIBCUDACXX_INLINE_VISIBILITY + __bit_const_reference(const __bit_const_reference&) = default; + + _LIBCUDACXX_INLINE_VISIBILITY + __bit_const_reference(const __bit_reference<_Cp>& __x) _NOEXCEPT + : __seg_(__x.__seg_), __mask_(__x.__mask_) {} + + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR operator bool() const _NOEXCEPT + {return static_cast(*__seg_ & __mask_);} + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, true> operator&() const _NOEXCEPT + {return __bit_iterator<_Cp, true>(__seg_, static_cast(__libcpp_ctz(__mask_)));} +private: + _LIBCUDACXX_INLINE_VISIBILITY + _LIBCUDACXX_CONSTEXPR + __bit_const_reference(__storage_pointer __s, __storage_type __m) _NOEXCEPT + : __seg_(__s), __mask_(__m) {} + + __bit_const_reference& operator=(const __bit_const_reference&) = delete; +}; + +// find + +template +__bit_iterator<_Cp, _IsConst> +__find_bool_true(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) +{ + typedef __bit_iterator<_Cp, _IsConst> _It; + typedef typename _It::__storage_type __storage_type; + static const int __bits_per_word = _It::__bits_per_word; + // do first partial word + if (__first.__ctz_ != 0) + { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __storage_type __b = *__first.__seg_ & __m; + if (__b) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); + if (__n == __dn) + return __first + __n; + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) + if (*__first.__seg_) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(*__first.__seg_))); + // do last partial word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = *__first.__seg_ & __m; + if (__b) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); + } + return _It(__first.__seg_, static_cast(__n)); +} + +template +__bit_iterator<_Cp, _IsConst> +__find_bool_false(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) +{ + typedef __bit_iterator<_Cp, _IsConst> _It; + typedef typename _It::__storage_type __storage_type; + const int __bits_per_word = _It::__bits_per_word; + // do first partial word + if (__first.__ctz_ != 0) + { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __storage_type __b = ~*__first.__seg_ & __m; + if (__b) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); + if (__n == __dn) + return __first + __n; + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) + { + __storage_type __b = ~*__first.__seg_; + if (__b) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); + } + // do last partial word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = ~*__first.__seg_ & __m; + if (__b) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); + } + return _It(__first.__seg_, static_cast(__n)); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<_Cp, _IsConst> +find(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, const _Tp& __value_) +{ + if (static_cast(__value_)) + return __find_bool_true(__first, static_cast(__last - __first)); + return __find_bool_false(__first, static_cast(__last - __first)); +} + +// count + +template +typename __bit_iterator<_Cp, _IsConst>::difference_type +__count_bool_true(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) +{ + typedef __bit_iterator<_Cp, _IsConst> _It; + typedef typename _It::__storage_type __storage_type; + typedef typename _It::difference_type difference_type; + const int __bits_per_word = _It::__bits_per_word; + difference_type __r = 0; + // do first partial word + if (__first.__ctz_ != 0) + { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __r = _CUDA_VSTD::__libcpp_popcount(*__first.__seg_ & __m); + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) + __r += _CUDA_VSTD::__libcpp_popcount(*__first.__seg_); + // do last partial word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __r += _CUDA_VSTD::__libcpp_popcount(*__first.__seg_ & __m); + } + return __r; +} + +template +typename __bit_iterator<_Cp, _IsConst>::difference_type +__count_bool_false(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) +{ + typedef __bit_iterator<_Cp, _IsConst> _It; + typedef typename _It::__storage_type __storage_type; + typedef typename _It::difference_type difference_type; + const int __bits_per_word = _It::__bits_per_word; + difference_type __r = 0; + // do first partial word + if (__first.__ctz_ != 0) + { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __r = _CUDA_VSTD::__libcpp_popcount(~*__first.__seg_ & __m); + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) + __r += _CUDA_VSTD::__libcpp_popcount(~*__first.__seg_); + // do last partial word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __r += _CUDA_VSTD::__libcpp_popcount(~*__first.__seg_ & __m); + } + return __r; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __bit_iterator<_Cp, _IsConst>::difference_type +count(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, const _Tp& __value_) +{ + if (static_cast(__value_)) + return __count_bool_true(__first, static_cast(__last - __first)); + return __count_bool_false(__first, static_cast(__last - __first)); +} + +// fill_n + +template +void +__fill_n_false(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n) +{ + typedef __bit_iterator<_Cp, false> _It; + typedef typename _It::__storage_type __storage_type; + const int __bits_per_word = _It::__bits_per_word; + // do first partial word + if (__first.__ctz_ != 0) + { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + *__first.__seg_ &= ~__m; + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + __storage_type __nw = __n / __bits_per_word; + _CUDA_VSTD::memset(_CUDA_VSTD::__to_raw_pointer(__first.__seg_), 0, __nw * sizeof(__storage_type)); + __n -= __nw * __bits_per_word; + // do last partial word + if (__n > 0) + { + __first.__seg_ += __nw; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + *__first.__seg_ &= ~__m; + } +} + +template +void +__fill_n_true(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n) +{ + typedef __bit_iterator<_Cp, false> _It; + typedef typename _It::__storage_type __storage_type; + const int __bits_per_word = _It::__bits_per_word; + // do first partial word + if (__first.__ctz_ != 0) + { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + *__first.__seg_ |= __m; + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + __storage_type __nw = __n / __bits_per_word; + _CUDA_VSTD::memset(_CUDA_VSTD::__to_raw_pointer(__first.__seg_), -1, __nw * sizeof(__storage_type)); + __n -= __nw * __bits_per_word; + // do last partial word + if (__n > 0) + { + __first.__seg_ += __nw; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + *__first.__seg_ |= __m; + } +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +fill_n(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n, bool __value_) +{ + if (__n > 0) + { + if (__value_) + __fill_n_true(__first, __n); + else + __fill_n_false(__first, __n); + } +} + +// fill + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +fill(__bit_iterator<_Cp, false> __first, __bit_iterator<_Cp, false> __last, bool __value_) +{ + _CUDA_VSTD::fill_n(__first, static_cast(__last - __first), __value_); +} + +// copy + +template +__bit_iterator<_Cp, false> +__copy_aligned(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, + __bit_iterator<_Cp, false> __result) +{ + typedef __bit_iterator<_Cp, _IsConst> _In; + typedef typename _In::difference_type difference_type; + typedef typename _In::__storage_type __storage_type; + const int __bits_per_word = _In::__bits_per_word; + difference_type __n = __last - __first; + if (__n > 0) + { + // do first word + if (__first.__ctz_ != 0) + { + unsigned __clz = __bits_per_word - __first.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz - __dn)); + __storage_type __b = *__first.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b; + __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); + ++__first.__seg_; + // __first.__ctz_ = 0; + } + // __first.__ctz_ == 0; + // do middle words + __storage_type __nw = __n / __bits_per_word; + _CUDA_VSTD::memmove(_CUDA_VSTD::__to_raw_pointer(__result.__seg_), + _CUDA_VSTD::__to_raw_pointer(__first.__seg_), + __nw * sizeof(__storage_type)); + __n -= __nw * __bits_per_word; + __result.__seg_ += __nw; + // do last word + if (__n > 0) + { + __first.__seg_ += __nw; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = *__first.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b; + __result.__ctz_ = static_cast(__n); + } + } + return __result; +} + +template +__bit_iterator<_Cp, false> +__copy_unaligned(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, + __bit_iterator<_Cp, false> __result) +{ + typedef __bit_iterator<_Cp, _IsConst> _In; + typedef typename _In::difference_type difference_type; + typedef typename _In::__storage_type __storage_type; + static const int __bits_per_word = _In::__bits_per_word; + difference_type __n = __last - __first; + if (__n > 0) + { + // do first word + if (__first.__ctz_ != 0) + { + unsigned __clz_f = __bits_per_word - __first.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz_f), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __storage_type __b = *__first.__seg_ & __m; + unsigned __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __ddn = _CUDA_VSTD::min<__storage_type>(__dn, __clz_r); + __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __ddn)); + *__result.__seg_ &= ~__m; + if (__result.__ctz_ > __first.__ctz_) + *__result.__seg_ |= __b << (__result.__ctz_ - __first.__ctz_); + else + *__result.__seg_ |= __b >> (__first.__ctz_ - __result.__ctz_); + __result.__seg_ += (__ddn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__ddn + __result.__ctz_) % __bits_per_word); + __dn -= __ddn; + if (__dn > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __dn); + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b >> (__first.__ctz_ + __ddn); + __result.__ctz_ = static_cast(__dn); + } + ++__first.__seg_; + // __first.__ctz_ = 0; + } + // __first.__ctz_ == 0; + // do middle words + unsigned __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __m = ~__storage_type(0) << __result.__ctz_; + for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_) + { + __storage_type __b = *__first.__seg_; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b << __result.__ctz_; + ++__result.__seg_; + *__result.__seg_ &= __m; + *__result.__seg_ |= __b >> __clz_r; + } + // do last word + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = *__first.__seg_ & __m; + __storage_type __dn = _CUDA_VSTD::min(__n, static_cast(__clz_r)); + __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __dn)); + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b << __result.__ctz_; + __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); + __n -= __dn; + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b >> __dn; + __result.__ctz_ = static_cast(__n); + } + } + } + return __result; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<_Cp, false> +copy(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) +{ + if (__first.__ctz_ == __result.__ctz_) + return __copy_aligned(__first, __last, __result); + return __copy_unaligned(__first, __last, __result); +} + +// copy_backward + +template +__bit_iterator<_Cp, false> +__copy_backward_aligned(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, + __bit_iterator<_Cp, false> __result) +{ + typedef __bit_iterator<_Cp, _IsConst> _In; + typedef typename _In::difference_type difference_type; + typedef typename _In::__storage_type __storage_type; + const int __bits_per_word = _In::__bits_per_word; + difference_type __n = __last - __first; + if (__n > 0) + { + // do first word + if (__last.__ctz_ != 0) + { + difference_type __dn = _CUDA_VSTD::min(static_cast(__last.__ctz_), __n); + __n -= __dn; + unsigned __clz = __bits_per_word - __last.__ctz_; + __storage_type __m = (~__storage_type(0) << (__last.__ctz_ - __dn)) & (~__storage_type(0) >> __clz); + __storage_type __b = *__last.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b; + __result.__ctz_ = static_cast(((-__dn & (__bits_per_word - 1)) + + __result.__ctz_) % __bits_per_word); + // __last.__ctz_ = 0 + } + // __last.__ctz_ == 0 || __n == 0 + // __result.__ctz_ == 0 || __n == 0 + // do middle words + __storage_type __nw = __n / __bits_per_word; + __result.__seg_ -= __nw; + __last.__seg_ -= __nw; + _CUDA_VSTD::memmove(_CUDA_VSTD::__to_raw_pointer(__result.__seg_), + _CUDA_VSTD::__to_raw_pointer(__last.__seg_), + __nw * sizeof(__storage_type)); + __n -= __nw * __bits_per_word; + // do last word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) << (__bits_per_word - __n); + __storage_type __b = *--__last.__seg_ & __m; + *--__result.__seg_ &= ~__m; + *__result.__seg_ |= __b; + __result.__ctz_ = static_cast(-__n & (__bits_per_word - 1)); + } + } + return __result; +} + +template +__bit_iterator<_Cp, false> +__copy_backward_unaligned(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, + __bit_iterator<_Cp, false> __result) +{ + typedef __bit_iterator<_Cp, _IsConst> _In; + typedef typename _In::difference_type difference_type; + typedef typename _In::__storage_type __storage_type; + const int __bits_per_word = _In::__bits_per_word; + difference_type __n = __last - __first; + if (__n > 0) + { + // do first word + if (__last.__ctz_ != 0) + { + difference_type __dn = _CUDA_VSTD::min(static_cast(__last.__ctz_), __n); + __n -= __dn; + unsigned __clz_l = __bits_per_word - __last.__ctz_; + __storage_type __m = (~__storage_type(0) << (__last.__ctz_ - __dn)) & (~__storage_type(0) >> __clz_l); + __storage_type __b = *__last.__seg_ & __m; + unsigned __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __ddn = _CUDA_VSTD::min(__dn, static_cast(__result.__ctz_)); + if (__ddn > 0) + { + __m = (~__storage_type(0) << (__result.__ctz_ - __ddn)) & (~__storage_type(0) >> __clz_r); + *__result.__seg_ &= ~__m; + if (__result.__ctz_ > __last.__ctz_) + *__result.__seg_ |= __b << (__result.__ctz_ - __last.__ctz_); + else + *__result.__seg_ |= __b >> (__last.__ctz_ - __result.__ctz_); + __result.__ctz_ = static_cast(((-__ddn & (__bits_per_word - 1)) + + __result.__ctz_) % __bits_per_word); + __dn -= __ddn; + } + if (__dn > 0) + { + // __result.__ctz_ == 0 + --__result.__seg_; + __result.__ctz_ = static_cast(-__dn & (__bits_per_word - 1)); + __m = ~__storage_type(0) << __result.__ctz_; + *__result.__seg_ &= ~__m; + __last.__ctz_ -= __dn + __ddn; + *__result.__seg_ |= __b << (__result.__ctz_ - __last.__ctz_); + } + // __last.__ctz_ = 0 + } + // __last.__ctz_ == 0 || __n == 0 + // __result.__ctz_ != 0 || __n == 0 + // do middle words + unsigned __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __m = ~__storage_type(0) >> __clz_r; + for (; __n >= __bits_per_word; __n -= __bits_per_word) + { + __storage_type __b = *--__last.__seg_; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b >> __clz_r; + *--__result.__seg_ &= __m; + *__result.__seg_ |= __b << __result.__ctz_; + } + // do last word + if (__n > 0) + { + __m = ~__storage_type(0) << (__bits_per_word - __n); + __storage_type __b = *--__last.__seg_ & __m; + __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __dn = _CUDA_VSTD::min(__n, static_cast(__result.__ctz_)); + __m = (~__storage_type(0) << (__result.__ctz_ - __dn)) & (~__storage_type(0) >> __clz_r); + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b >> (__bits_per_word - __result.__ctz_); + __result.__ctz_ = static_cast(((-__dn & (__bits_per_word - 1)) + + __result.__ctz_) % __bits_per_word); + __n -= __dn; + if (__n > 0) + { + // __result.__ctz_ == 0 + --__result.__seg_; + __result.__ctz_ = static_cast(-__n & (__bits_per_word - 1)); + __m = ~__storage_type(0) << __result.__ctz_; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b << (__result.__ctz_ - (__bits_per_word - __n - __dn)); + } + } + } + return __result; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<_Cp, false> +copy_backward(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) +{ + if (__last.__ctz_ == __result.__ctz_) + return __copy_backward_aligned(__first, __last, __result); + return __copy_backward_unaligned(__first, __last, __result); +} + +// move + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<_Cp, false> +move(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) +{ + return _CUDA_VSTD::copy(__first, __last, __result); +} + +// move_backward + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<_Cp, false> +move_backward(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) +{ + return _CUDA_VSTD::copy_backward(__first, __last, __result); +} + +// swap_ranges + +template +__bit_iterator<__C2, false> +__swap_ranges_aligned(__bit_iterator<__C1, false> __first, __bit_iterator<__C1, false> __last, + __bit_iterator<__C2, false> __result) +{ + typedef __bit_iterator<__C1, false> _I1; + typedef typename _I1::difference_type difference_type; + typedef typename _I1::__storage_type __storage_type; + const int __bits_per_word = _I1::__bits_per_word; + difference_type __n = __last - __first; + if (__n > 0) + { + // do first word + if (__first.__ctz_ != 0) + { + unsigned __clz = __bits_per_word - __first.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz - __dn)); + __storage_type __b1 = *__first.__seg_ & __m; + *__first.__seg_ &= ~__m; + __storage_type __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b1; + *__first.__seg_ |= __b2; + __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); + ++__first.__seg_; + // __first.__ctz_ = 0; + } + // __first.__ctz_ == 0; + // do middle words + for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_, ++__result.__seg_) + swap(*__first.__seg_, *__result.__seg_); + // do last word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b1 = *__first.__seg_ & __m; + *__first.__seg_ &= ~__m; + __storage_type __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b1; + *__first.__seg_ |= __b2; + __result.__ctz_ = static_cast(__n); + } + } + return __result; +} + +template +__bit_iterator<__C2, false> +__swap_ranges_unaligned(__bit_iterator<__C1, false> __first, __bit_iterator<__C1, false> __last, + __bit_iterator<__C2, false> __result) +{ + typedef __bit_iterator<__C1, false> _I1; + typedef typename _I1::difference_type difference_type; + typedef typename _I1::__storage_type __storage_type; + const int __bits_per_word = _I1::__bits_per_word; + difference_type __n = __last - __first; + if (__n > 0) + { + // do first word + if (__first.__ctz_ != 0) + { + unsigned __clz_f = __bits_per_word - __first.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz_f), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __storage_type __b1 = *__first.__seg_ & __m; + *__first.__seg_ &= ~__m; + unsigned __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __ddn = _CUDA_VSTD::min<__storage_type>(__dn, __clz_r); + __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __ddn)); + __storage_type __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + if (__result.__ctz_ > __first.__ctz_) + { + unsigned __s = __result.__ctz_ - __first.__ctz_; + *__result.__seg_ |= __b1 << __s; + *__first.__seg_ |= __b2 >> __s; + } + else + { + unsigned __s = __first.__ctz_ - __result.__ctz_; + *__result.__seg_ |= __b1 >> __s; + *__first.__seg_ |= __b2 << __s; + } + __result.__seg_ += (__ddn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__ddn + __result.__ctz_) % __bits_per_word); + __dn -= __ddn; + if (__dn > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __dn); + __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + unsigned __s = __first.__ctz_ + __ddn; + *__result.__seg_ |= __b1 >> __s; + *__first.__seg_ |= __b2 << __s; + __result.__ctz_ = static_cast(__dn); + } + ++__first.__seg_; + // __first.__ctz_ = 0; + } + // __first.__ctz_ == 0; + // do middle words + __storage_type __m = ~__storage_type(0) << __result.__ctz_; + unsigned __clz_r = __bits_per_word - __result.__ctz_; + for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_) + { + __storage_type __b1 = *__first.__seg_; + __storage_type __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b1 << __result.__ctz_; + *__first.__seg_ = __b2 >> __result.__ctz_; + ++__result.__seg_; + __b2 = *__result.__seg_ & ~__m; + *__result.__seg_ &= __m; + *__result.__seg_ |= __b1 >> __clz_r; + *__first.__seg_ |= __b2 << __clz_r; + } + // do last word + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b1 = *__first.__seg_ & __m; + *__first.__seg_ &= ~__m; + __storage_type __dn = _CUDA_VSTD::min<__storage_type>(__n, __clz_r); + __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __dn)); + __storage_type __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b1 << __result.__ctz_; + *__first.__seg_ |= __b2 >> __result.__ctz_; + __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); + __n -= __dn; + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b1 >> __dn; + *__first.__seg_ |= __b2 << __dn; + __result.__ctz_ = static_cast(__n); + } + } + } + return __result; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<__C2, false> +swap_ranges(__bit_iterator<__C1, false> __first1, __bit_iterator<__C1, false> __last1, + __bit_iterator<__C2, false> __first2) +{ + if (__first1.__ctz_ == __first2.__ctz_) + return __swap_ranges_aligned(__first1, __last1, __first2); + return __swap_ranges_unaligned(__first1, __last1, __first2); +} + +// rotate + +template +struct __bit_array +{ + typedef typename _Cp::difference_type difference_type; + typedef typename _Cp::__storage_type __storage_type; + typedef typename _Cp::__storage_pointer __storage_pointer; + typedef typename _Cp::iterator iterator; + static const unsigned __bits_per_word = _Cp::__bits_per_word; + static const unsigned _Np = 4; + + difference_type __size_; + __storage_type __word_[_Np]; + + _LIBCUDACXX_INLINE_VISIBILITY static difference_type capacity() + {return static_cast(_Np * __bits_per_word);} + _LIBCUDACXX_INLINE_VISIBILITY explicit __bit_array(difference_type __s) : __size_(__s) {} + _LIBCUDACXX_INLINE_VISIBILITY iterator begin() + { + return iterator(pointer_traits<__storage_pointer>::pointer_to(__word_[0]), 0); + } + _LIBCUDACXX_INLINE_VISIBILITY iterator end() + { + return iterator(pointer_traits<__storage_pointer>::pointer_to(__word_[0]) + __size_ / __bits_per_word, + static_cast(__size_ % __bits_per_word)); + } +}; + +template +__bit_iterator<_Cp, false> +rotate(__bit_iterator<_Cp, false> __first, __bit_iterator<_Cp, false> __middle, __bit_iterator<_Cp, false> __last) +{ + typedef __bit_iterator<_Cp, false> _I1; + typedef typename _I1::difference_type difference_type; + difference_type __d1 = __middle - __first; + difference_type __d2 = __last - __middle; + _I1 __r = __first + __d2; + while (__d1 != 0 && __d2 != 0) + { + if (__d1 <= __d2) + { + if (__d1 <= __bit_array<_Cp>::capacity()) + { + __bit_array<_Cp> __b(__d1); + _CUDA_VSTD::copy(__first, __middle, __b.begin()); + _CUDA_VSTD::copy(__b.begin(), __b.end(), _CUDA_VSTD::copy(__middle, __last, __first)); + break; + } + else + { + __bit_iterator<_Cp, false> __mp = _CUDA_VSTD::swap_ranges(__first, __middle, __middle); + __first = __middle; + __middle = __mp; + __d2 -= __d1; + } + } + else + { + if (__d2 <= __bit_array<_Cp>::capacity()) + { + __bit_array<_Cp> __b(__d2); + _CUDA_VSTD::copy(__middle, __last, __b.begin()); + _CUDA_VSTD::copy_backward(__b.begin(), __b.end(), _CUDA_VSTD::copy_backward(__first, __middle, __last)); + break; + } + else + { + __bit_iterator<_Cp, false> __mp = __first + __d2; + _CUDA_VSTD::swap_ranges(__first, __mp, __middle); + __first = __mp; + __d1 -= __d2; + } + } + } + return __r; +} + +// equal + +template +bool +__equal_unaligned(__bit_iterator<_Cp, _IC1> __first1, __bit_iterator<_Cp, _IC1> __last1, + __bit_iterator<_Cp, _IC2> __first2) +{ + typedef __bit_iterator<_Cp, _IC1> _It; + typedef typename _It::difference_type difference_type; + typedef typename _It::__storage_type __storage_type; + static const int __bits_per_word = _It::__bits_per_word; + difference_type __n = __last1 - __first1; + if (__n > 0) + { + // do first word + if (__first1.__ctz_ != 0) + { + unsigned __clz_f = __bits_per_word - __first1.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz_f), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first1.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __storage_type __b = *__first1.__seg_ & __m; + unsigned __clz_r = __bits_per_word - __first2.__ctz_; + __storage_type __ddn = _CUDA_VSTD::min<__storage_type>(__dn, __clz_r); + __m = (~__storage_type(0) << __first2.__ctz_) & (~__storage_type(0) >> (__clz_r - __ddn)); + if (__first2.__ctz_ > __first1.__ctz_) + { + if ((*__first2.__seg_ & __m) != (__b << (__first2.__ctz_ - __first1.__ctz_))) + return false; + } + else + { + if ((*__first2.__seg_ & __m) != (__b >> (__first1.__ctz_ - __first2.__ctz_))) + return false; + } + __first2.__seg_ += (__ddn + __first2.__ctz_) / __bits_per_word; + __first2.__ctz_ = static_cast((__ddn + __first2.__ctz_) % __bits_per_word); + __dn -= __ddn; + if (__dn > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __dn); + if ((*__first2.__seg_ & __m) != (__b >> (__first1.__ctz_ + __ddn))) + return false; + __first2.__ctz_ = static_cast(__dn); + } + ++__first1.__seg_; + // __first1.__ctz_ = 0; + } + // __first1.__ctz_ == 0; + // do middle words + unsigned __clz_r = __bits_per_word - __first2.__ctz_; + __storage_type __m = ~__storage_type(0) << __first2.__ctz_; + for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first1.__seg_) + { + __storage_type __b = *__first1.__seg_; + if ((*__first2.__seg_ & __m) != (__b << __first2.__ctz_)) + return false; + ++__first2.__seg_; + if ((*__first2.__seg_ & ~__m) != (__b >> __clz_r)) + return false; + } + // do last word + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = *__first1.__seg_ & __m; + __storage_type __dn = _CUDA_VSTD::min(__n, static_cast(__clz_r)); + __m = (~__storage_type(0) << __first2.__ctz_) & (~__storage_type(0) >> (__clz_r - __dn)); + if ((*__first2.__seg_ & __m) != (__b << __first2.__ctz_)) + return false; + __first2.__seg_ += (__dn + __first2.__ctz_) / __bits_per_word; + __first2.__ctz_ = static_cast((__dn + __first2.__ctz_) % __bits_per_word); + __n -= __dn; + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + if ((*__first2.__seg_ & __m) != (__b >> __dn)) + return false; + } + } + } + return true; +} + +template +bool +__equal_aligned(__bit_iterator<_Cp, _IC1> __first1, __bit_iterator<_Cp, _IC1> __last1, + __bit_iterator<_Cp, _IC2> __first2) +{ + typedef __bit_iterator<_Cp, _IC1> _It; + typedef typename _It::difference_type difference_type; + typedef typename _It::__storage_type __storage_type; + static const int __bits_per_word = _It::__bits_per_word; + difference_type __n = __last1 - __first1; + if (__n > 0) + { + // do first word + if (__first1.__ctz_ != 0) + { + unsigned __clz = __bits_per_word - __first1.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first1.__ctz_) & (~__storage_type(0) >> (__clz - __dn)); + if ((*__first2.__seg_ & __m) != (*__first1.__seg_ & __m)) + return false; + ++__first2.__seg_; + ++__first1.__seg_; + // __first1.__ctz_ = 0; + // __first2.__ctz_ = 0; + } + // __first1.__ctz_ == 0; + // __first2.__ctz_ == 0; + // do middle words + for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first1.__seg_, ++__first2.__seg_) + if (*__first2.__seg_ != *__first1.__seg_) + return false; + // do last word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + if ((*__first2.__seg_ & __m) != (*__first1.__seg_ & __m)) + return false; + } + } + return true; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +equal(__bit_iterator<_Cp, _IC1> __first1, __bit_iterator<_Cp, _IC1> __last1, __bit_iterator<_Cp, _IC2> __first2) +{ + if (__first1.__ctz_ == __first2.__ctz_) + return __equal_aligned(__first1, __last1, __first2); + return __equal_unaligned(__first1, __last1, __first2); +} + +template +class __bit_iterator +{ +public: + typedef typename _Cp::difference_type difference_type; + typedef bool value_type; + typedef __bit_iterator pointer; + typedef typename conditional<_IsConst, __bit_const_reference<_Cp>, __bit_reference<_Cp> >::type reference; + typedef random_access_iterator_tag iterator_category; + +private: + typedef typename _Cp::__storage_type __storage_type; + typedef typename conditional<_IsConst, typename _Cp::__const_storage_pointer, + typename _Cp::__storage_pointer>::type __storage_pointer; + static const unsigned __bits_per_word = _Cp::__bits_per_word; + + __storage_pointer __seg_; + unsigned __ctz_; + +public: + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator() _NOEXCEPT +#if _LIBCUDACXX_STD_VER > 11 + : __seg_(nullptr), __ctz_(0) +#endif + {} + // avoid re-declaring a copy constructor for the non-const version. + using __type_for_copy_to_const = + _If<_IsConst, __bit_iterator<_Cp, false>, struct __private_nat>; + + _LIBCUDACXX_INLINE_VISIBILITY + __bit_iterator(const __type_for_copy_to_const& __it) _NOEXCEPT + : __seg_(__it.__seg_), __ctz_(__it.__ctz_) {} + + _LIBCUDACXX_INLINE_VISIBILITY reference operator*() const _NOEXCEPT + {return reference(__seg_, __storage_type(1) << __ctz_);} + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator++() + { + if (__ctz_ != __bits_per_word-1) + ++__ctz_; + else + { + __ctz_ = 0; + ++__seg_; + } + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator++(int) + { + __bit_iterator __tmp = *this; + ++(*this); + return __tmp; + } + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator--() + { + if (__ctz_ != 0) + --__ctz_; + else + { + __ctz_ = __bits_per_word - 1; + --__seg_; + } + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator--(int) + { + __bit_iterator __tmp = *this; + --(*this); + return __tmp; + } + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator+=(difference_type __n) + { + if (__n >= 0) + __seg_ += (__n + __ctz_) / __bits_per_word; + else + __seg_ += static_cast(__n - __bits_per_word + __ctz_ + 1) + / static_cast(__bits_per_word); + __n &= (__bits_per_word - 1); + __ctz_ = static_cast((__n + __ctz_) % __bits_per_word); + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator-=(difference_type __n) + { + return *this += -__n; + } + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator+(difference_type __n) const + { + __bit_iterator __t(*this); + __t += __n; + return __t; + } + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator-(difference_type __n) const + { + __bit_iterator __t(*this); + __t -= __n; + return __t; + } + + _LIBCUDACXX_INLINE_VISIBILITY + friend __bit_iterator operator+(difference_type __n, const __bit_iterator& __it) {return __it + __n;} + + _LIBCUDACXX_INLINE_VISIBILITY + friend difference_type operator-(const __bit_iterator& __x, const __bit_iterator& __y) + {return (__x.__seg_ - __y.__seg_) * __bits_per_word + __x.__ctz_ - __y.__ctz_;} + + _LIBCUDACXX_INLINE_VISIBILITY reference operator[](difference_type __n) const {return *(*this + __n);} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator==(const __bit_iterator& __x, const __bit_iterator& __y) + {return __x.__seg_ == __y.__seg_ && __x.__ctz_ == __y.__ctz_;} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator!=(const __bit_iterator& __x, const __bit_iterator& __y) + {return !(__x == __y);} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator<(const __bit_iterator& __x, const __bit_iterator& __y) + {return __x.__seg_ < __y.__seg_ || (__x.__seg_ == __y.__seg_ && __x.__ctz_ < __y.__ctz_);} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator>(const __bit_iterator& __x, const __bit_iterator& __y) + {return __y < __x;} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator<=(const __bit_iterator& __x, const __bit_iterator& __y) + {return !(__y < __x);} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator>=(const __bit_iterator& __x, const __bit_iterator& __y) + {return !(__x < __y);} + +private: + _LIBCUDACXX_INLINE_VISIBILITY + __bit_iterator(__storage_pointer __s, unsigned __ctz) _NOEXCEPT + : __seg_(__s), __ctz_(__ctz) {} + + friend typename _Cp::__self; + + friend class __bit_reference<_Cp>; + friend class __bit_const_reference<_Cp>; + friend class __bit_iterator<_Cp, true>; + template friend struct __bit_array; + template friend void __fill_n_false(__bit_iterator<_Dp, false> __first, typename _Dp::size_type __n); + template friend void __fill_n_true(__bit_iterator<_Dp, false> __first, typename _Dp::size_type __n); + template friend __bit_iterator<_Dp, false> __copy_aligned(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<_Dp, false> __copy_unaligned(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<_Dp, false> copy(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<_Dp, false> __copy_backward_aligned(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<_Dp, false> __copy_backward_unaligned(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<_Dp, false> copy_backward(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<__C2, false> __swap_ranges_aligned(__bit_iterator<__C1, false>, + __bit_iterator<__C1, false>, + __bit_iterator<__C2, false>); + template friend __bit_iterator<__C2, false> __swap_ranges_unaligned(__bit_iterator<__C1, false>, + __bit_iterator<__C1, false>, + __bit_iterator<__C2, false>); + template friend __bit_iterator<__C2, false> swap_ranges(__bit_iterator<__C1, false>, + __bit_iterator<__C1, false>, + __bit_iterator<__C2, false>); + template friend __bit_iterator<_Dp, false> rotate(__bit_iterator<_Dp, false>, + __bit_iterator<_Dp, false>, + __bit_iterator<_Dp, false>); + template friend bool __equal_aligned(__bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC2>); + template friend bool __equal_unaligned(__bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC2>); + template friend bool equal(__bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC2>); + template friend __bit_iterator<_Dp, _IC> __find_bool_true(__bit_iterator<_Dp, _IC>, + typename _Dp::size_type); + template friend __bit_iterator<_Dp, _IC> __find_bool_false(__bit_iterator<_Dp, _IC>, + typename _Dp::size_type); + template friend typename __bit_iterator<_Dp, _IC>::difference_type + __count_bool_true(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); + template friend typename __bit_iterator<_Dp, _IC>::difference_type + __count_bool_false(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); +}; + +_LIBCUDACXX_END_NAMESPACE_STD + +_LIBCUDACXX_POP_MACROS + +#endif // _LIBCUDACXX___BIT_REFERENCE diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bsd_locale_defaults.h b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bsd_locale_defaults.h new file mode 100644 index 000000000000..609865abd163 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bsd_locale_defaults.h @@ -0,0 +1,36 @@ +// -*- C++ -*- +//===---------------------- __bsd_locale_defaults.h -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// The BSDs have lots of *_l functions. We don't want to define those symbols +// on other platforms though, for fear of conflicts with user code. So here, +// we will define the mapping from an internal macro to the real BSD symbol. +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX_BSD_LOCALE_DEFAULTS_H +#define _LIBCUDACXX_BSD_LOCALE_DEFAULTS_H + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +#define __libcpp_mb_cur_max_l(loc) MB_CUR_MAX_L(loc) +#define __libcpp_btowc_l(ch, loc) btowc_l(ch, loc) +#define __libcpp_wctob_l(wch, loc) wctob_l(wch, loc) +#define __libcpp_wcsnrtombs_l(dst, src, nwc, len, ps, loc) wcsnrtombs_l(dst, src, nwc, len, ps, loc) +#define __libcpp_wcrtomb_l(src, wc, ps, loc) wcrtomb_l(src, wc, ps, loc) +#define __libcpp_mbsnrtowcs_l(dst, src, nms, len, ps, loc) mbsnrtowcs_l(dst, src, nms, len, ps, loc) +#define __libcpp_mbrtowc_l(pwc, s, n, ps, l) mbrtowc_l(pwc, s, n, ps, l) +#define __libcpp_mbtowc_l(pwc, pmb, max, l) mbtowc_l(pwc, pmb, max, l) +#define __libcpp_mbrlen_l(s, n, ps, l) mbrlen_l(s, n, ps, l) +#define __libcpp_localeconv_l(l) localeconv_l(l) +#define __libcpp_mbsrtowcs_l(dest, src, len, ps, l) mbsrtowcs_l(dest, src, len, ps, l) +#define __libcpp_snprintf_l(...) snprintf_l(__VA_ARGS__) +#define __libcpp_asprintf_l(...) asprintf_l(__VA_ARGS__) +#define __libcpp_sscanf_l(...) sscanf_l(__VA_ARGS__) + +#endif // _LIBCUDACXX_BSD_LOCALE_DEFAULTS_H diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bsd_locale_fallbacks.h b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bsd_locale_fallbacks.h new file mode 100644 index 000000000000..58220977d76d --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__bsd_locale_fallbacks.h @@ -0,0 +1,139 @@ +// -*- C++ -*- +//===---------------------- __bsd_locale_fallbacks.h ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// The BSDs have lots of *_l functions. This file provides reimplementations +// of those functions for non-BSD platforms. +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX_BSD_LOCALE_FALLBACKS_DEFAULTS_H +#define _LIBCUDACXX_BSD_LOCALE_FALLBACKS_DEFAULTS_H + +#include +#include +#include + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +inline _LIBCUDACXX_INLINE_VISIBILITY +decltype(MB_CUR_MAX) __libcpp_mb_cur_max_l(locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return MB_CUR_MAX; +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +wint_t __libcpp_btowc_l(int __c, locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return btowc(__c); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +int __libcpp_wctob_l(wint_t __c, locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return wctob(__c); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +size_t __libcpp_wcsnrtombs_l(char *__dest, const wchar_t **__src, size_t __nwc, + size_t __len, mbstate_t *__ps, locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return wcsnrtombs(__dest, __src, __nwc, __len, __ps); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +size_t __libcpp_wcrtomb_l(char *__s, wchar_t __wc, mbstate_t *__ps, locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return wcrtomb(__s, __wc, __ps); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +size_t __libcpp_mbsnrtowcs_l(wchar_t * __dest, const char **__src, size_t __nms, + size_t __len, mbstate_t *__ps, locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return mbsnrtowcs(__dest, __src, __nms, __len, __ps); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +size_t __libcpp_mbrtowc_l(wchar_t *__pwc, const char *__s, size_t __n, + mbstate_t *__ps, locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return mbrtowc(__pwc, __s, __n, __ps); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +int __libcpp_mbtowc_l(wchar_t *__pwc, const char *__pmb, size_t __max, locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return mbtowc(__pwc, __pmb, __max); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +size_t __libcpp_mbrlen_l(const char *__s, size_t __n, mbstate_t *__ps, locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return mbrlen(__s, __n, __ps); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +lconv *__libcpp_localeconv_l(locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return localeconv(); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +size_t __libcpp_mbsrtowcs_l(wchar_t *__dest, const char **__src, size_t __len, + mbstate_t *__ps, locale_t __l) +{ + __libcpp_locale_guard __current(__l); + return mbsrtowcs(__dest, __src, __len, __ps); +} + +inline +int __libcpp_snprintf_l(char *__s, size_t __n, locale_t __l, const char *__format, ...) { + va_list __va; + va_start(__va, __format); + __libcpp_locale_guard __current(__l); + int __res = vsnprintf(__s, __n, __format, __va); + va_end(__va); + return __res; +} + +inline +int __libcpp_asprintf_l(char **__s, locale_t __l, const char *__format, ...) { + va_list __va; + va_start(__va, __format); + __libcpp_locale_guard __current(__l); + int __res = vasprintf(__s, __format, __va); + va_end(__va); + return __res; +} + +inline +int __libcpp_sscanf_l(const char *__s, locale_t __l, const char *__format, ...) { + va_list __va; + va_start(__va, __format); + __libcpp_locale_guard __current(__l); + int __res = vsscanf(__s, __format, __va); + va_end(__va); + return __res; +} + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _LIBCUDACXX_BSD_LOCALE_FALLBACKS_DEFAULTS_H diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__config b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__config new file mode 100644 index 000000000000..2714477eca5b --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__config @@ -0,0 +1,1943 @@ +// -*- C++ -*- +//===--------------------------- __config ---------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX_CONFIG +#define _LIBCUDACXX_CONFIG + +#if defined(_MSC_VER) && !defined(__clang__) + #define _LIBCUDACXX_HAS_PRAGMA_MSVC_WARNING + #if !defined(_LIBCUDACXX_DISABLE_PRAGMA_MSVC_WARNING) + #define _LIBCUDACXX_USE_PRAGMA_MSVC_WARNING + #endif +#else + #define _LIBCUDACXX_HAS_PRAGMA_GCC_SYSTEM_HEADER + #if !defined(_LIBCUDACXX_DISABLE_PRAGMA_GCC_SYSTEM_HEADER) + #define _LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER + #endif +#endif + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +#ifdef __cplusplus + +// __config may be included in `extern "C"` contexts, switch back to include +extern "C++" { +#include +} + +#ifdef __GNUC__ +# define _GNUC_VER (__GNUC__ * 100 + __GNUC_MINOR__) +// The _GNUC_VER_NEW macro better represents the new GCC versioning scheme +// introduced in GCC 5.0. +# define _GNUC_VER_NEW (_GNUC_VER * 10 + __GNUC_PATCHLEVEL__) +#else +# define _GNUC_VER 0 +# define _GNUC_VER_NEW 0 +#endif + +#define _LIBCUDACXX_VERSION 10000 + +#ifndef _LIBCUDACXX_ABI_VERSION +# define _LIBCUDACXX_ABI_VERSION 1 +#endif + +#ifndef __STDC_HOSTED__ +# define _LIBCUDACXX_FREESTANDING +#endif + +#ifndef _LIBCUDACXX_STD_VER +# if defined(_MSC_VER) +# if _MSVC_LANG <= 201103L +# define _LIBCUDACXX_STD_VER 11 +# elif _MSVC_LANG <= 201402L +# define _LIBCUDACXX_STD_VER 14 +# elif _MSVC_LANG <= 201703L +# define _LIBCUDACXX_STD_VER 17 +# else +# define _LIBCUDACXX_STD_VER 19 // current year, or date of c++2a ratification +# endif +# else +# if __cplusplus <= 201103L +# define _LIBCUDACXX_STD_VER 11 +# elif __cplusplus <= 201402L +# define _LIBCUDACXX_STD_VER 14 +# elif __cplusplus <= 201703L +# define _LIBCUDACXX_STD_VER 17 +# else +# define _LIBCUDACXX_STD_VER 19 // current year, or date of c++2a ratification +# endif +# endif +#endif // _LIBCUDACXX_STD_VER + +#if _LIBCUDACXX_STD_VER < 11 +#define _LIBCUDACXX_CXX03_LANG +#endif + +#if defined(__PGIC__) && defined(__linux__) + #define __ELF__ +#endif + +#if defined(__ELF__) +# define _LIBCUDACXX_OBJECT_FORMAT_ELF 1 +#elif defined(__MACH__) +# define _LIBCUDACXX_OBJECT_FORMAT_MACHO 1 +#elif defined(_WIN32) +# define _LIBCUDACXX_OBJECT_FORMAT_COFF 1 +#elif defined(__wasm__) +# define _LIBCUDACXX_OBJECT_FORMAT_WASM 1 +#else +# error Unknown object file format +#endif + +#if defined(_LIBCUDACXX_ABI_UNSTABLE) || _LIBCUDACXX_ABI_VERSION >= 2 +// Change short string representation so that string data starts at offset 0, +// improving its alignment in some cases. +# define _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT +// Fix deque iterator type in order to support incomplete types. +# define _LIBCUDACXX_ABI_INCOMPLETE_TYPES_IN_DEQUE +// Fix undefined behavior in how std::list stores its linked nodes. +# define _LIBCUDACXX_ABI_LIST_REMOVE_NODE_POINTER_UB +// Fix undefined behavior in how __tree stores its end and parent nodes. +# define _LIBCUDACXX_ABI_TREE_REMOVE_NODE_POINTER_UB +// Fix undefined behavior in how __hash_table stores its pointer types. +# define _LIBCUDACXX_ABI_FIX_UNORDERED_NODE_POINTER_UB +# define _LIBCUDACXX_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB +# define _LIBCUDACXX_ABI_FIX_UNORDERED_CONTAINER_SIZE_TYPE +// Don't use a nullptr_t simulation type in C++03 instead using C++11 nullptr +// provided under the alternate keyword __nullptr, which changes the mangling +// of nullptr_t. This option is ABI incompatible with GCC in C++03 mode. +# define _LIBCUDACXX_ABI_ALWAYS_USE_CXX11_NULLPTR +// Define the `pointer_safety` enum as a C++11 strongly typed enumeration +// instead of as a class simulating an enum. If this option is enabled +// `pointer_safety` and `get_pointer_safety()` will no longer be available +// in C++03. +# define _LIBCUDACXX_ABI_POINTER_SAFETY_ENUM_TYPE +// Define a key function for `bad_function_call` in the library, to centralize +// its vtable and typeinfo to libc++ rather than having all other libraries +// using that class define their own copies. +# define _LIBCUDACXX_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION +// Enable optimized version of __do_get_(un)signed which avoids redundant copies. +# define _LIBCUDACXX_ABI_OPTIMIZED_LOCALE_NUM_GET +// Use the smallest possible integer type to represent the index of the variant. +// Previously libc++ used "unsigned int" exclusively. +# define _LIBCUDACXX_ABI_VARIANT_INDEX_TYPE_OPTIMIZATION +// Unstable attempt to provide a more optimized std::function +# define _LIBCUDACXX_ABI_OPTIMIZED_FUNCTION +// All the regex constants must be distinct and nonzero. +# define _LIBCUDACXX_ABI_REGEX_CONSTANTS_NONZERO +#elif _LIBCUDACXX_ABI_VERSION == 1 +# if !defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) +// Enable compiling copies of now inline methods into the dylib to support +// applications compiled against older libraries. This is unnecessary with +// COFF dllexport semantics, since dllexport forces a non-inline definition +// of inline functions to be emitted anyway. Our own non-inline copy would +// conflict with the dllexport-emitted copy, so we disable it. +# define _LIBCUDACXX_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS +# endif +// Feature macros for disabling pre ABI v1 features. All of these options +// are deprecated. +# if defined(__FreeBSD__) +# define _LIBCUDACXX_DEPRECATED_ABI_DISABLE_PAIR_TRIVIAL_COPY_CTOR +# endif +#endif + +#ifdef _LIBCUDACXX_TRIVIAL_PAIR_COPY_CTOR +#error "_LIBCUDACXX_TRIVIAL_PAIR_COPY_CTOR" is no longer supported. \ + use _LIBCUDACXX_DEPRECATED_ABI_DISABLE_PAIR_TRIVIAL_COPY_CTOR instead +#endif + +#define _LIBCUDACXX_CONCAT1(_LIBCUDACXX_X,_LIBCUDACXX_Y) _LIBCUDACXX_X##_LIBCUDACXX_Y +#define _LIBCUDACXX_CONCAT(_LIBCUDACXX_X,_LIBCUDACXX_Y) _LIBCUDACXX_CONCAT1(_LIBCUDACXX_X,_LIBCUDACXX_Y) + +#ifndef _LIBCUDACXX_ABI_NAMESPACE +# define _LIBCUDACXX_ABI_NAMESPACE _LIBCUDACXX_CONCAT(__,_LIBCUDACXX_ABI_VERSION) +#endif + +#ifndef __has_attribute +#define __has_attribute(__x) 0 +#endif + +#ifndef __has_builtin +#define __has_builtin(__x) 0 +#endif + +#ifndef __has_extension +#define __has_extension(__x) 0 +#endif + +#ifndef __has_feature +#define __has_feature(__x) 0 +#endif + +#ifndef __has_cpp_attribute +#define __has_cpp_attribute(__x) 0 +#endif + +// '__is_identifier' returns '0' if '__x' is a reserved identifier provided by +// the compiler and '1' otherwise. +#ifndef __is_identifier +#define __is_identifier(__x) 1 +#endif + +#ifndef __has_declspec_attribute +#define __has_declspec_attribute(__x) 0 +#endif + +#define __has_keyword(__x) !(__is_identifier(__x)) + +#ifndef __has_include +#define __has_include(...) 0 +#endif + +#if defined(__PGIC__) +# define _LIBCUDACXX_COMPILER_PGI +#elif defined(__clang__) +# define _LIBCUDACXX_COMPILER_CLANG +# ifndef __apple_build_version__ +# define _LIBCUDACXX_CLANG_VER (__clang_major__ * 100 + __clang_minor__) +# endif +#elif defined(__GNUC__) +# define _LIBCUDACXX_COMPILER_GCC +#elif defined(_MSC_VER) +# define _LIBCUDACXX_COMPILER_MSVC +#elif defined(__IBMCPP__) +# define _LIBCUDACXX_COMPILER_IBM +#elif defined(__CUDACC_RTC__) +# define _LIBCUDACXX_COMPILER_NVRTC +#endif + +#if defined(__NVCC__) +// This is not mutually exclusive with other compilers, as NVCC uses a host +// compiler. +# define _LIBCUDACXX_COMPILER_NVCC +#endif + +#if !defined(_LIBCUDACXX_COMPILER_NVCC) && !defined(_LIBCUDACXX_COMPILER_NVRTC) +// If NVCC is not being used can safely use `long double` without warnings +# define _LIBCUDACXX_HAS_COMPLEX_LONG_DOUBLE +// NVCC does not have a way of silencing non '_' prefixed UDLs +# define _LIBCUDACXX_HAS_STL_LITERALS +#endif + +#if defined(_LIBCUDACXX_COMPILER_GCC) && __cplusplus < 201103L +#error "libc++ does not support using GCC with C++03. Please enable C++11" +#endif + +// FIXME: ABI detection should be done via compiler builtin macros. This +// is just a placeholder until Clang implements such macros. For now assume +// that Windows compilers pretending to be MSVC++ target the Microsoft ABI, +// and allow the user to explicitly specify the ABI to handle cases where this +// heuristic falls short. +#if defined(_LIBCUDACXX_ABI_FORCE_ITANIUM) && defined(_LIBCUDACXX_ABI_FORCE_MICROSOFT) +# error "Only one of _LIBCUDACXX_ABI_FORCE_ITANIUM and _LIBCUDACXX_ABI_FORCE_MICROSOFT can be defined" +#elif defined(_LIBCUDACXX_ABI_FORCE_ITANIUM) +# define _LIBCUDACXX_ABI_ITANIUM +#elif defined(_LIBCUDACXX_ABI_FORCE_MICROSOFT) +# define _LIBCUDACXX_ABI_MICROSOFT +#else +# if defined(_WIN32) && defined(_MSC_VER) +# define _LIBCUDACXX_ABI_MICROSOFT +# else +# define _LIBCUDACXX_ABI_ITANIUM +# endif +#endif + +#if defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_NO_VCRUNTIME) +# define _LIBCUDACXX_ABI_VCRUNTIME +#endif + +// Need to detect which libc we're using if we're on Linux. +#if defined(__linux__) +# include +# if defined(__GLIBC_PREREQ) +# define _LIBCUDACXX_GLIBC_PREREQ(a, b) __GLIBC_PREREQ(a, b) +# else +# define _LIBCUDACXX_GLIBC_PREREQ(a, b) 0 +# endif // defined(__GLIBC_PREREQ) +#endif // defined(__linux__) + +#ifdef __LITTLE_ENDIAN__ +# if __LITTLE_ENDIAN__ +# define _LIBCUDACXX_LITTLE_ENDIAN +# endif // __LITTLE_ENDIAN__ +#endif // __LITTLE_ENDIAN__ + +#ifdef __BIG_ENDIAN__ +# if __BIG_ENDIAN__ +# define _LIBCUDACXX_BIG_ENDIAN +# endif // __BIG_ENDIAN__ +#endif // __BIG_ENDIAN__ + +#ifdef __BYTE_ORDER__ +# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define _LIBCUDACXX_LITTLE_ENDIAN +# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define _LIBCUDACXX_BIG_ENDIAN +# endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#endif // __BYTE_ORDER__ + +#ifdef __FreeBSD__ +# include +# if _BYTE_ORDER == _LITTLE_ENDIAN +# define _LIBCUDACXX_LITTLE_ENDIAN +# else // _BYTE_ORDER == _LITTLE_ENDIAN +# define _LIBCUDACXX_BIG_ENDIAN +# endif // _BYTE_ORDER == _LITTLE_ENDIAN +# ifndef __LONG_LONG_SUPPORTED +# define _LIBCUDACXX_HAS_NO_LONG_LONG +# endif // __LONG_LONG_SUPPORTED +#endif // __FreeBSD__ + +#ifdef __NetBSD__ +# include +# if _BYTE_ORDER == _LITTLE_ENDIAN +# define _LIBCUDACXX_LITTLE_ENDIAN +# else // _BYTE_ORDER == _LITTLE_ENDIAN +# define _LIBCUDACXX_BIG_ENDIAN +# endif // _BYTE_ORDER == _LITTLE_ENDIAN +# define _LIBCUDACXX_HAS_QUICK_EXIT +#endif // __NetBSD__ + +#if defined(_WIN32) +# define _LIBCUDACXX_WIN32API +# define _LIBCUDACXX_LITTLE_ENDIAN +# define _LIBCUDACXX_SHORT_WCHAR 1 +// Both MinGW and native MSVC provide a "MSVC"-like environment +# define _LIBCUDACXX_MSVCRT_LIKE +// If mingw not explicitly detected, assume using MS C runtime only if +// a MS compatibility version is specified. +# if defined(_MSC_VER) && !defined(__MINGW32__) +# define _LIBCUDACXX_MSVCRT // Using Microsoft's C Runtime library +# endif +# if (defined(_M_AMD64) || defined(__x86_64__)) || (defined(_M_ARM) || defined(__arm__)) +# define _LIBCUDACXX_HAS_BITSCAN64 +# endif +# define _LIBCUDACXX_HAS_OPEN_WITH_WCHAR +# if defined(_LIBCUDACXX_MSVCRT) +# define _LIBCUDACXX_HAS_QUICK_EXIT +# endif + +// Some CRT APIs are unavailable to store apps +# if defined(WINAPI_FAMILY) +# include +# if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && \ + (!defined(WINAPI_PARTITION_SYSTEM) || \ + !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_SYSTEM)) +# define _LIBCUDACXX_WINDOWS_STORE_APP +# endif +# endif +#endif // defined(_WIN32) + +#ifdef __sun__ +# include +# ifdef _LITTLE_ENDIAN +# define _LIBCUDACXX_LITTLE_ENDIAN +# else +# define _LIBCUDACXX_BIG_ENDIAN +# endif +#endif // __sun__ + +#if defined(__CloudABI__) + // Certain architectures provide arc4random(). Prefer using + // arc4random() over /dev/{u,}random to make it possible to obtain + // random data even when using sandboxing mechanisms such as chroots, + // Capsicum, etc. +# define _LIBCUDACXX_USING_ARC4_RANDOM +#elif defined(__Fuchsia__) || defined(__wasi__) +# define _LIBCUDACXX_USING_GETENTROPY +#elif defined(__native_client__) + // NaCl's sandbox (which PNaCl also runs in) doesn't allow filesystem access, + // including accesses to the special files under /dev. C++11's + // std::random_device is instead exposed through a NaCl syscall. +# define _LIBCUDACXX_USING_NACL_RANDOM +#elif defined(_LIBCUDACXX_WIN32API) +# define _LIBCUDACXX_USING_WIN32_RANDOM +#else +# define _LIBCUDACXX_USING_DEV_RANDOM +#endif + +#if !defined(_LIBCUDACXX_LITTLE_ENDIAN) && !defined(_LIBCUDACXX_BIG_ENDIAN) +# include +# if __BYTE_ORDER == __LITTLE_ENDIAN +# define _LIBCUDACXX_LITTLE_ENDIAN +# elif __BYTE_ORDER == __BIG_ENDIAN +# define _LIBCUDACXX_BIG_ENDIAN +# else // __BYTE_ORDER == __BIG_ENDIAN +# error unable to determine endian +# endif +#endif // !defined(_LIBCUDACXX_LITTLE_ENDIAN) && !defined(_LIBCUDACXX_BIG_ENDIAN) + +#if __has_attribute(__no_sanitize__) && !defined(_LIBCUDACXX_COMPILER_GCC) +# define _LIBCUDACXX_NO_CFI __attribute__((__no_sanitize__("cfi"))) +#else +# define _LIBCUDACXX_NO_CFI +#endif + +#if __ISO_C_VISIBLE >= 2011 || __cplusplus >= 201103L +# if defined(__FreeBSD__) +# define _LIBCUDACXX_HAS_QUICK_EXIT +# define _LIBCUDACXX_HAS_C11_FEATURES +# elif defined(__Fuchsia__) || defined(__wasi__) +# define _LIBCUDACXX_HAS_QUICK_EXIT +# define _LIBCUDACXX_HAS_TIMESPEC_GET +# define _LIBCUDACXX_HAS_C11_FEATURES +# elif defined(__linux__) +# if !defined(_LIBCUDACXX_HAS_MUSL_LIBC) +# if _LIBCUDACXX_GLIBC_PREREQ(2, 15) || defined(__BIONIC__) +# define _LIBCUDACXX_HAS_QUICK_EXIT +# endif +# if _LIBCUDACXX_GLIBC_PREREQ(2, 17) +# define _LIBCUDACXX_HAS_C11_FEATURES +# define _LIBCUDACXX_HAS_TIMESPEC_GET +# endif +# else // defined(_LIBCUDACXX_HAS_MUSL_LIBC) +# define _LIBCUDACXX_HAS_QUICK_EXIT +# define _LIBCUDACXX_HAS_TIMESPEC_GET +# define _LIBCUDACXX_HAS_C11_FEATURES +# endif +# endif // __linux__ +#endif + +#ifndef _LIBCUDACXX_CXX03_LANG +# define _LIBCUDACXX_ALIGNOF(_Tp) alignof(_Tp) +#elif defined(_LIBCUDACXX_COMPILER_CLANG) +# define _LIBCUDACXX_ALIGNOF(_Tp) _Alignof(_Tp) +#else +// This definition is potentially buggy, but it's only taken with GCC in C++03, +// which we barely support anyway. See llvm.org/PR39713 +# define _LIBCUDACXX_ALIGNOF(_Tp) __alignof(_Tp) +#endif + +#define _LIBCUDACXX_PREFERRED_ALIGNOF(_Tp) __alignof(_Tp) + +#if defined(_LIBCUDACXX_COMPILER_NVRTC) +#define _LIBCUDACXX_OFFSET_IS_ZERO(type, member) !(&(((type *)0)->member)) +#else +#define _LIBCUDACXX_OFFSET_IS_ZERO(type, member) !offsetof(type, member) +#endif + +#if defined(_LIBCUDACXX_COMPILER_CLANG) + +// _LIBCUDACXX_ALTERNATE_STRING_LAYOUT is an old name for +// _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT left here for backward compatibility. +#if (defined(__APPLE__) && !defined(__i386__) && !defined(__x86_64__) && \ + (!defined(__arm__) || __ARM_ARCH_7K__ >= 2)) || \ + defined(_LIBCUDACXX_ALTERNATE_STRING_LAYOUT) +#define _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT +#endif + +#if __has_feature(cxx_alignas) +# define _ALIGNAS_TYPE(x) alignas(x) +# define _ALIGNAS(x) alignas(x) +#else +# define _ALIGNAS_TYPE(x) __attribute__((__aligned__(_LIBCUDACXX_ALIGNOF(x)))) +# define _ALIGNAS(x) __attribute__((__aligned__(x))) +#endif + +#if __cplusplus < 201103L +typedef __char16_t char16_t; +typedef __char32_t char32_t; +#endif + +#if !(__has_feature(cxx_exceptions)) && !defined(_LIBCUDACXX_NO_EXCEPTIONS) +#define _LIBCUDACXX_NO_EXCEPTIONS +#endif + +#if !(__has_feature(cxx_rtti)) && !defined(_LIBCUDACXX_NO_RTTI) +#define _LIBCUDACXX_NO_RTTI +#endif + +#if !(__has_feature(cxx_strong_enums)) +#define _LIBCUDACXX_HAS_NO_STRONG_ENUMS +#endif + +#if __has_feature(cxx_attributes) +# define _LIBCUDACXX_NORETURN [[noreturn]] +#else +# define _LIBCUDACXX_NORETURN __attribute__ ((noreturn)) +#endif + +#if !(__has_feature(cxx_lambdas)) +#define _LIBCUDACXX_HAS_NO_LAMBDAS +#endif + +#if !(__has_feature(cxx_nullptr)) +# if (__has_extension(cxx_nullptr) || __has_keyword(__nullptr)) && defined(_LIBCUDACXX_ABI_ALWAYS_USE_CXX11_NULLPTR) +# define nullptr __nullptr +# else +# define _LIBCUDACXX_HAS_NO_NULLPTR +# endif +#endif + +#if !(__has_feature(cxx_rvalue_references)) +#define _LIBCUDACXX_HAS_NO_RVALUE_REFERENCES +#endif + +#if !(__has_feature(cxx_auto_type)) +#define _LIBCUDACXX_HAS_NO_AUTO_TYPE +#endif + +#if !(__has_feature(cxx_variadic_templates)) +#define _LIBCUDACXX_HAS_NO_VARIADICS +#endif + +#if !(__has_feature(cxx_generalized_initializers)) +#define _LIBCUDACXX_HAS_NO_GENERALIZED_INITIALIZERS +#endif + +#if __has_feature(is_base_of) +#define _LIBCUDACXX_IS_BASE_OF(...) __is_base_of(__VA_ARGS__) +#endif + +#if __has_feature(is_final) +#define _LIBCUDACXX_IS_FINAL(...) __is_final(__VA_ARGS__) +#endif + +// Objective-C++ features (opt-in) +#if __has_feature(objc_arc) +#define _LIBCUDACXX_HAS_OBJC_ARC +#endif + +#if __has_feature(objc_arc_weak) +#define _LIBCUDACXX_HAS_OBJC_ARC_WEAK +#endif + +#if !(__has_feature(cxx_relaxed_constexpr)) +#define _LIBCUDACXX_HAS_NO_CXX14_CONSTEXPR +#endif + +#if !(__has_feature(cxx_variable_templates)) +#define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES +#endif + +#if !(__has_feature(cxx_noexcept)) +#define _LIBCUDACXX_HAS_NO_NOEXCEPT +#endif + +#if __has_feature(underlying_type) +#define _LIBCUDACXX_UNDERLYING_TYPE(...) __underlying_type(__VA_ARGS__) +#endif + +#if __has_feature(is_constructible) +#define _LIBCUDACXX_IS_CONSTRUCTIBLE(...) __is_constructible(__VA_ARGS__) +#endif + +#if __has_keyword(__is_destructible) +#define _LIBCUDACXX_IS_DESTRUCTIBLE(...) __is_destructible(__VA_ARGS__) +#endif + +#if __has_feature(is_trivially_constructible) +#define _LIBCUDACXX_IS_TRIVIALLY_CONSTRUCTIBLE(...) __is_trivially_constructible(__VA_ARGS__) +#endif + +// https://bugs.llvm.org/show_bug.cgi?id=44517 +#if __has_keyword(__is_trivially_destructible) +#define _LIBCUDACXX_IS_TRIVIALLY_DESTRUCTIBLE(...) __is_trivially_destructible(__VA_ARGS__) +#endif + +// https://bugs.llvm.org/show_bug.cgi?id=44517 +#if __has_keyword(__is_nothrow_constructible) +#define _LIBCUDACXX_IS_NOTHROW_CONSTRUCTIBLE(...) __is_nothrow_constructible(__VA_ARGS__) +#endif + +// https://bugs.llvm.org/show_bug.cgi?id=44517 +#if __has_keyword(__is_nothrow_assignable) +#define _LIBCUDACXX_IS_NOTHROW_ASSIGNABLE(...) __is_nothrow_assignable(__VA_ARGS__) +#endif + +// https://bugs.llvm.org/show_bug.cgi?id=44517 +#if __has_keyword(__is_assignable) +#define _LIBCUDACXX_IS_ASSIGNABLE(...) __is_assignable(__VA_ARGS__) +#endif + +// https://bugs.llvm.org/show_bug.cgi?id=44517 +#if __has_keyword(__is_same) && !defined(_LIBCUDACXX_COMPILER_NVCC) +#define _LIBCUDACXX_IS_SAME(...) __is_same(__VA_ARGS__) +#endif + +#if __has_feature(has_trivial_constructor) +#define _LIBCUDACXX_HAS_TRIVIAL_CONSTRUCTOR(...) __has_trivial_constructor(__VA_ARGS__) +#endif + +#if __has_feature(has_nothrow_constructor) +#define _LIBCUDACXX_HAS_NOTHROW_CONSTRUCTOR(...) __has_nothrow_constructor(__VA_ARGS__) +#endif + +#if __has_feature(has_nothrow_copy) +#define _LIBCUDACXX_HAS_NOTHROW_COPY(...) __has_nothrow_copy(__VA_ARGS__) +#endif + +#if __has_feature(has_nothrow_assign) +#define _LIBCUDACXX_HAS_NOTHROW_ASSIGN(...) __has_nothrow_assign(__VA_ARGS__) +#endif + +#if __has_feature(has_trivial_destructor) +#define _LIBCUDACXX_HAS_TRIVIAL_DESTRUCTOR(...) __has_trivial_destructor(__VA_ARGS__) +#endif + +#if __has_feature(is_trivially_assignable) +#define _LIBCUDACXX_IS_TRIVIALLY_ASSIGNABLE(...) __is_trivially_assignable(__VA_ARGS__) +#endif + +#if __has_feature(is_trivially_copyable) +#define _LIBCUDACXX_IS_TRIVIALLY_COPYABLE(...) __is_trivially_copyable(__VA_ARGS__) +#endif + +#if __has_feature(is_trivial) +#define _LIBCUDACXX_IS_TRIVIAL(...) __is_trivial(__VA_ARGS__) +#endif + +#if __has_feature(is_convertible_to) +#define _LIBCUDACXX_IS_CONVERTIBLE_TO(...) __is_convertible_to(__VA_ARGS__) +#endif + +// https://bugs.llvm.org/show_bug.cgi?id=44517 +#if __has_keyword(__is_function) && !defined(_LIBCUDACXX_COMPILER_NVCC) +#define _LIBCUDACXX_IS_FUNCTION(...) __is_function(__VA_ARGS__) +#endif + +#if __has_feature(is_union) +#define _LIBCUDACXX_IS_UNION(...) __is_union(__VA_ARGS__) +#endif + +#if __has_feature(is_class) +#define _LIBCUDACXX_IS_CLASS(...) __is_class(__VA_ARGS__) +#endif + +#if __has_keyword(__is_aggregate) +#define _LIBCUDACXX_IS_AGGREGATE(...) __is_aggregate(__VA_ARGS__) +#endif + +#if __has_feature(is_pod) +#define _LIBCUDACXX_IS_POD(...) __is_pod(__VA_ARGS__) +#endif + +#if __has_feature(is_standard_layout) +#define _LIBCUDACXX_IS_STANDARD_LAYOUT(...) __is_standard_layout(__VA_ARGS__) +#endif + +#if __has_feature(is_enum) +#define _LIBCUDACXX_IS_ENUM(...) __is_enum(__VA_ARGS__) +#endif + +#if __has_feature(is_empty) +#define _LIBCUDACXX_IS_EMPTY(...) __is_empty(__VA_ARGS__) +#endif + +#if __has_feature(is_polymorphic) +#define _LIBCUDACXX_IS_POLYMORPHIC(...) __is_polymorphic(__VA_ARGS__) +#endif + +#if __has_feature(has_virtual_destructor) +#define _LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR(...) __has_virtual_destructor(__VA_ARGS__) +#endif + +#if __has_feature(is_literal) +#define _LIBCUDACXX_IS_LITERAL(...) __is_literal(__VA_ARGS__) +#endif + +#if !defined(_LIBCUDACXX_HAS_NO_ASAN) && !__has_feature(address_sanitizer) +#define _LIBCUDACXX_HAS_NO_ASAN +#endif + +// Allow for build-time disabling of unsigned integer sanitization +#if !defined(_LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK) && __has_attribute(no_sanitize) +#define _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK __attribute__((__no_sanitize__("unsigned-integer-overflow"))) +#endif + +#if __has_builtin(__builtin_launder) +#define _LIBCUDACXX_LAUNDER(...) __builtin_launder(__VA_ARGS__) +#endif + +#if !__is_identifier(__has_unique_object_representations) +#define _LIBCUDACXX_HAS_UNIQUE_OBJECT_REPRESENTATIONS(...) __has_unique_object_representations(__VA_ARGS__) +#endif + +#if !defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) +#define _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(...) __atomic_always_lock_free(__VA_ARGS__) +#endif + +// NVCC in C++11 mode freaks out about `__builtin_is_constant_evaluated`. +#if __has_builtin(__builtin_is_constant_evaluated) && !(defined(_LIBCUDACXX_COMPILER_NVCC) && _LIBCUDACXX_STD_VER < 14) +#define _LIBCUDACXX_IS_CONSTANT_EVALUATED(...) __builtin_is_constant_evaluated(__VA_ARGS__) +#endif + +#if __has_builtin(__builtin_addressof) +#define _LIBCUDACXX_ADDRESSOF(...) __builtin_addressof(__VA_ARGS__) +#endif + +#define _LIBCUDACXX_ALWAYS_INLINE __attribute__ ((__always_inline__)) + +// Literal operators ""d and ""y are supported starting with LLVM Clang 8 and AppleClang 10.0.1 +#if (defined(_LIBCUDACXX_CLANG_VER) && _LIBCUDACXX_CLANG_VER < 800) || \ + (defined(__apple_build_version__) && __apple_build_version__ < 10010000) +#define _LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS +#endif + +#if __has_feature(cxx_reference_qualified_functions) +#define _LIBCUDACXX_HAS_REFERENCE_QUALIFIED_FUNCTIONS +#endif + +#define _LIBCUDACXX_DISABLE_EXTENSION_WARNING __extension__ + +#elif defined(_LIBCUDACXX_COMPILER_GCC) + +#define _ALIGNAS(x) __attribute__((__aligned__(x))) +#define _ALIGNAS_TYPE(x) __attribute__((__aligned__(_LIBCUDACXX_ALIGNOF(x)))) + +#define _LIBCUDACXX_NORETURN __attribute__((noreturn)) + +#if _GNUC_VER >= 403 +#define _LIBCUDACXX_IS_UNION(...) __is_union(__VA_ARGS__) +#define _LIBCUDACXX_IS_CLASS(...) __is_class(__VA_ARGS__) +#define _LIBCUDACXX_IS_POD(...) __is_pod(__VA_ARGS__) +#define _LIBCUDACXX_IS_ENUM(...) __is_enum(__VA_ARGS__) +#define _LIBCUDACXX_IS_EMPTY(...) __is_empty(__VA_ARGS__) +#define _LIBCUDACXX_IS_POLYMORPHIC(...) __is_polymorphic(__VA_ARGS__) +#define _LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR(...) __has_virtual_destructor(__VA_ARGS__) +#define _LIBCUDACXX_IS_BASE_OF(...) __is_base_of(__VA_ARGS__) +#define _LIBCUDACXX_HAS_TRIVIAL_CONSTRUCTOR(...) __has_trivial_constructor(__VA_ARGS__) +#define _LIBCUDACXX_HAS_NOTHROW_CONSTRUCTOR(...) __has_nothrow_constructor(__VA_ARGS__) +#define _LIBCUDACXX_HAS_NOTHROW_COPY(...) __has_nothrow_copy(__VA_ARGS__) +#define _LIBCUDACXX_HAS_NOTHROW_ASSIGN(...) __has_nothrow_assign(__VA_ARGS__) +#define _LIBCUDACXX_HAS_TRIVIAL_DESTRUCTOR(...) __has_trivial_destructor(__VA_ARGS__) +#endif + +#if _GNUC_VER >= 405 +#define _LIBCUDACXX_IS_TRIVIAL(...) __is_trivial(__VA_ARGS__) +#endif + +#if _GNUC_VER >= 406 +#define _LIBCUDACXX_IS_LITERAL(...) __is_literal_type(__VA_ARGS__) +#endif + +#if _GNUC_VER >= 407 +#define _LIBCUDACXX_UNDERLYING_TYPE(...) __underlying_type(__VA_ARGS__) +#define _LIBCUDACXX_IS_FINAL(...) __is_final(__VA_ARGS__) +#define _LIBCUDACXX_IS_STANDARD_LAYOUT(...) __is_standard_layout(__VA_ARGS__) +#endif + +#if _GNUC_VER >= 409 +#define _LIBCUDACXX_HAS_REFERENCE_QUALIFIED_FUNCTIONS +#endif + +#if _GNUC_VER >= 501 +#define _LIBCUDACXX_IS_TRIVIALLY_CONSTRUCTIBLE(...) __is_trivially_constructible(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIALLY_ASSIGNABLE(...) __is_trivially_assignable(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIALLY_COPYABLE(...) __is_trivially_copyable(__VA_ARGS__) +#endif + +// FIXME: GCC 8.0 supports this trait, but it has a bug. +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91592 +// https://godbolt.org/z/IljfIw +//#if _GNUC_VER >= 800 +//#define _LIBCUDACXX_IS_ASSIGNABLE(...) __is_assignable(__VA_ARGS__) +//#endif + +#if _GNUC_VER >= 1000 +// __is_same_as is useless: GCC complains when you try to use it in a SFINAE condition +// which is the primary place where libc++ tries to use it. Disable. +// #define _LIBCUDACXX_IS_SAME(...) __is_same_as(__VA_ARGS__) +#endif + +#if !__EXCEPTIONS && !defined(_LIBCUDACXX_NO_EXCEPTIONS) +#define _LIBCUDACXX_NO_EXCEPTIONS +#endif + +// Determine if GCC supports relaxed constexpr +#if !defined(__cpp_constexpr) || __cpp_constexpr < 201304L +#define _LIBCUDACXX_HAS_NO_CXX14_CONSTEXPR +#endif + +// GCC 5 supports variable templates +#if !defined(__cpp_variable_templates) || __cpp_variable_templates < 201304L +#define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES +#endif + +#ifndef __GXX_EXPERIMENTAL_CXX0X__ + +#define _LIBCUDACXX_HAS_NO_DECLTYPE +#define _LIBCUDACXX_HAS_NO_NULLPTR +#define _LIBCUDACXX_HAS_NO_UNICODE_CHARS +#define _LIBCUDACXX_HAS_NO_VARIADICS +#define _LIBCUDACXX_HAS_NO_RVALUE_REFERENCES +#define _LIBCUDACXX_HAS_NO_STRONG_ENUMS +#define _LIBCUDACXX_HAS_NO_NOEXCEPT + +#else // __GXX_EXPERIMENTAL_CXX0X__ + +#if _GNUC_VER < 403 +#define _LIBCUDACXX_HAS_NO_RVALUE_REFERENCES +#endif + +#if _GNUC_VER < 404 +#define _LIBCUDACXX_HAS_NO_DECLTYPE +#define _LIBCUDACXX_HAS_NO_UNICODE_CHARS +#define _LIBCUDACXX_HAS_NO_VARIADICS +#define _LIBCUDACXX_HAS_NO_GENERALIZED_INITIALIZERS +#endif // _GNUC_VER < 404 + +#if _GNUC_VER < 406 +#define _LIBCUDACXX_HAS_NO_NOEXCEPT +#define _LIBCUDACXX_HAS_NO_NULLPTR +#endif + +#endif // __GXX_EXPERIMENTAL_CXX0X__ + +#if !defined(_LIBCUDACXX_HAS_NO_ASAN) && !defined(__SANITIZE_ADDRESS__) +#define _LIBCUDACXX_HAS_NO_ASAN +#endif + +#if _GNUC_VER < 600 && defined(_LIBCUDACXX_COMPILER_NVCC) +#define _LIBCUDACXX_MISSING_GCC_MATH_INTRINSICS +#endif + +#if _GNUC_VER >= 700 +#define _LIBCUDACXX_LAUNDER(...) __builtin_launder(__VA_ARGS__) +#define _LIBCUDACXX_HAS_UNIQUE_OBJECT_REPRESENTATIONS(...) __has_unique_object_representations(__VA_ARGS__) +#define _LIBCUDACXX_IS_AGGREGATE(...) __is_aggregate(__VA_ARGS__) +#define _LIBCUDACXX_ADDRESSOF(...) __builtin_addressof(__VA_ARGS__) +#endif + +#if !defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) +#define _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(...) __atomic_always_lock_free(__VA_ARGS__) +#endif + +// NVCC in C++11 mode freaks out about `__builtin_is_constant_evaluated`. +#if _GNUC_VER >= 900 && !(defined(_LIBCUDACXX_COMPILER_NVCC) && _LIBCUDACXX_STD_VER < 14) +#define _LIBCUDACXX_IS_CONSTANT_EVALUATED(...) __builtin_is_constant_evaluated(__VA_ARGS__) +#endif + +// NVCC cannot properly handle some deductions occuring within NOEXCEPT +// C++17 mode causes reference instatiation errors in tuple +#if (_GNUC_VER >= 702 && _GNUC_VER <= 805) +#if defined(_LIBCUDACXX_COMPILER_NVCC) && _LIBCUDACXX_STD_VER == 17 +#define _LIBCUDACXX_NO_TUPLE_NOEXCEPT +#endif +#endif + +#define _LIBCUDACXX_ALWAYS_INLINE __attribute__ ((__always_inline__)) + +#define _LIBCUDACXX_DISABLE_EXTENSION_WARNING __extension__ + +#elif defined(_LIBCUDACXX_COMPILER_MSVC) + +#define _LIBCUDACXX_TOSTRING2(x) #x +#define _LIBCUDACXX_TOSTRING(x) _LIBCUDACXX_TOSTRING2(x) +#define _LIBCUDACXX_WARNING(x) __pragma(message(__FILE__ "(" _LIBCUDACXX_TOSTRING(__LINE__) ") : warning note: " x)) + +#define _LIBCUDACXX_ADDRESSOF(...) __builtin_addressof(__VA_ARGS__) + +// https://github.com/microsoft/STL/blob/master/stl/inc/yvals_core.h#L353 +// warning C4100: 'quack': unreferenced formal parameter +// warning C4127: conditional expression is constant +// warning C4180: qualifier applied to function type has no meaning; ignored +// warning C4197: 'purr': top-level volatile in cast is ignored +// warning C4324: 'roar': structure was padded due to alignment specifier +// warning C4455: literal suffix identifiers that do not start with an underscore are reserved +// warning C4503: 'hum': decorated name length exceeded, name was truncated +// warning C4522: 'woof' : multiple assignment operators specified +// warning C4668: 'meow' is not defined as a preprocessor macro, replacing with '0' for '#if/#elif' +// warning C4800: 'boo': forcing value to bool 'true' or 'false' (performance warning) +// warning C4996: 'meow': was declared deprecated +#define _LIBCUDACXX_MSVC_DISABLED_WARNINGS \ + 4100 \ + 4127 \ + 4180 \ + 4197 \ + 4296 \ + 4324 \ + 4455 \ + 4503 \ + 4522 \ + 4668 \ + 4800 \ + 4996 \ + /**/ + +#if _MSC_VER < 1900 +#error "MSVC versions prior to Visual Studio 2015 are not supported" +#endif + +// MSVC implemented P0030R1 in 15.7, only available under C++17 +#if _MSC_VER < 1914 +#define _LIBCUDACXX_NO_HOST_CPP17_HYPOT +#endif + +#if _MSC_VER < 1920 +#define _LIBCUDACXX_HAS_NO_NOEXCEPT_SFINAE +#define _LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO // This is conservative but correct. +#define _LIBCUDACXX_HAS_NO_LOGICAL_METAFUNCTION_ALIASES +#endif + +// MSVC exposed __iso_volatile intrinsics beginning on 1924 for x86 +#if _MSC_VER < 1924 + #define _LIBCUDACXX_MSVC_HAS_NO_ISO_INTRIN +#endif + +#if _MSC_VER > 1924 +# define _LIBCUDACXX_IS_CONSTANT_EVALUATED(...) __builtin_is_constant_evaluated(__VA_ARGS__) +#endif + +#define _LIBCUDACXX_UNDERLYING_TYPE(...) __underlying_type(__VA_ARGS__) +#define _LIBCUDACXX_IS_CONSTRUCTIBLE(...) __is_constructible(__VA_ARGS__) +#define _LIBCUDACXX_IS_DESTRUCTIBLE(...) __is_destructible(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIALLY_CONSTRUCTIBLE(...) __is_trivially_constructible(__VA_ARGS__) +#define _LIBCUDACXX_HAS_TRIVIAL_CONSTRUCTOR(...) __has_trivial_constructor(__VA_ARGS__) +#define _LIBCUDACXX_IS_NOTHROW_CONSTRUCTIBLE(...) __is_nothrow_constructible(__VA_ARGS__) +#define _LIBCUDACXX_HAS_NOTHROW_CONSTRUCTOR(...) __has_nothrow_constructor(__VA_ARGS__) +#define _LIBCUDACXX_HAS_NOTHROW_COPY(...) __has_nothrow_copy(__VA_ARGS__) +#define _LIBCUDACXX_HAS_NOTHROW_ASSIGN(...) __has_nothrow_assign(__VA_ARGS__) +#define _LIBCUDACXX_IS_NOTHROW_ASSIGNABLE(...) __is_nothrow_assignable(__VA_ARGS__) +#define _LIBCUDACXX_HAS_TRIVIAL_DESTRUCTOR(...) __has_trivial_destructor(__VA_ARGS__) +#define _LIBCUDACXX_IS_NOTHROW_DESTRUCTIBLE(...) __is_nothrow_destructible(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIALLY_DESTRUCTIBLE(...) __is_trivially_destructible(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIALLY_ASSIGNABLE(...) __is_trivially_assignable(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIALLY_COPYABLE(...) __is_trivially_copyable(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIAL(...) __is_trivial(__VA_ARGS__) +#define _LIBCUDACXX_IS_LITERAL(...) __is_literal_type(__VA_ARGS__) +#define _LIBCUDACXX_IS_UNION(...) __is_union(__VA_ARGS__) +#define _LIBCUDACXX_IS_CLASS(...) __is_class(__VA_ARGS__) +#if _MSC_VER > 1914 // MSVC doesn't include __is_aggregate until 19.15 +#define _LIBCUDACXX_IS_AGGREGATE(...) __is_aggregate(__VA_ARGS__) +#endif // _MSC_VER > 1914 +#define _LIBCUDACXX_IS_POD(...) __is_pod(__VA_ARGS__) +#define _LIBCUDACXX_IS_STANDARD_LAYOUT(...) __is_standard_layout(__VA_ARGS__) +#define _LIBCUDACXX_IS_ENUM(...) __is_enum(__VA_ARGS__) +#define _LIBCUDACXX_IS_EMPTY(...) __is_empty(__VA_ARGS__) +#define _LIBCUDACXX_IS_POLYMORPHIC(...) __is_polymorphic(__VA_ARGS__) +#define _LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR(...) __has_virtual_destructor(__VA_ARGS__) +#define _LIBCUDACXX_IS_BASE_OF(...) __is_base_of(__VA_ARGS__) +#define _LIBCUDACXX_IS_FINAL(...) __is_final(__VA_ARGS__) +#define _LIBCUDACXX_IS_CONVERTIBLE_TO(...) __is_convertible_to(__VA_ARGS__) +#define _LIBCUDACXX_IS_ASSIGNABLE(...) __is_assignable(__VA_ARGS__) +#define __alignof__ __alignof +#define _LIBCUDACXX_NORETURN __declspec(noreturn) +#define _ALIGNAS(x) __declspec(align(x)) +#define _ALIGNAS_TYPE(x) alignas(x) + +#if _LIBCUDACXX_STD_VER < 14 +#define _LIBCUDACXX_HAS_NO_CXX14_CONSTEXPR +#define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES +#endif + +#define _LIBCUDACXX_WEAK + +#define _LIBCUDACXX_HAS_NO_ASAN + +#define _LIBCUDACXX_ALWAYS_INLINE __forceinline + +#define _LIBCUDACXX_HAS_NO_VECTOR_EXTENSION + +#define _LIBCUDACXX_HAS_REFERENCE_QUALIFIED_FUNCTIONS +#define _LIBCUDACXX_DISABLE_EXTENSION_WARNING + +#elif defined(_LIBCUDACXX_COMPILER_IBM) + +#define _ALIGNAS(x) __attribute__((__aligned__(x))) +#define _ALIGNAS_TYPE(x) __attribute__((__aligned__(_LIBCUDACXX_ALIGNOF(x)))) +#define _ATTRIBUTE(x) __attribute__((x)) +#define _LIBCUDACXX_NORETURN __attribute__((noreturn)) + +#define _LIBCUDACXX_HAS_NO_UNICODE_CHARS +#define _LIBCUDACXX_IS_BASE_OF(...) __is_base_of(__VA_ARGS__) +#define _LIBCUDACXX_IS_FINAL(...) __is_final(__VA_ARGS__) +#define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES + +#if defined(_AIX) +#define __MULTILOCALE_API +#endif + +#define _LIBCUDACXX_HAS_NO_ASAN + +#define _LIBCUDACXX_ALWAYS_INLINE __attribute__ ((__always_inline__)) + +#define _LIBCUDACXX_HAS_NO_VECTOR_EXTENSION + +#elif defined(_LIBCUDACXX_COMPILER_NVRTC) || defined(_LIBCUDACXX_COMPILER_PGI) + +#if !defined(__cpp_constexpr) || __cpp_constexpr < 201304L +#define _LIBCUDACXX_HAS_NO_CXX14_CONSTEXPR +#endif + +#if !defined(__cpp_variable_templates) || __cpp_variable_templates < 201304L +#define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES +#endif + +#define _ALIGNAS_TYPE(x) alignas(x) +#define _ALIGNAS(x) alignas(x) + +#define _LIBCUDACXX_UNDERLYING_TYPE(...) __underlying_type(__VA_ARGS__) +#define _LIBCUDACXX_IS_CONSTRUCTIBLE(...) __is_constructible(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIALLY_CONSTRUCTIBLE(...) __is_trivially_constructible(__VA_ARGS__) +#define _LIBCUDACXX_HAS_TRIVIAL_CONSTRUCTOR(...) __has_trivial_constructor(__VA_ARGS__) +#define _LIBCUDACXX_IS_NOTHROW_CONSTRUCTIBLE(...) __is_nothrow_constructible(__VA_ARGS__) +#define _LIBCUDACXX_HAS_NOTHROW_CONSTRUCTOR(...) __has_nothrow_constructor(__VA_ARGS__) +#define _LIBCUDACXX_HAS_NOTHROW_COPY(...) __has_nothrow_copy(__VA_ARGS__) +#define _LIBCUDACXX_HAS_NOTHROW_ASSIGN(...) __has_nothrow_assign(__VA_ARGS__) +#define _LIBCUDACXX_IS_NOTHROW_ASSIGNABLE(...) __is_nothrow_assignable(__VA_ARGS__) +#define _LIBCUDACXX_HAS_TRIVIAL_DESTRUCTOR(...) __has_trivial_destructor(__VA_ARGS__) +#define _LIBCUDACXX_IS_NOTHROW_DESTRUCTIBLE(...) __is_nothrow_destructible(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIALLY_DESTRUCTIBLE(...) __is_trivially_destructible(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIALLY_ASSIGNABLE(...) __is_trivially_assignable(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIALLY_COPYABLE(...) __is_trivially_copyable(__VA_ARGS__) +#define _LIBCUDACXX_IS_TRIVIAL(...) __is_trivial(__VA_ARGS__) +#define _LIBCUDACXX_IS_LITERAL(...) __is_literal_type(__VA_ARGS__) +#define _LIBCUDACXX_IS_UNION(...) __is_union(__VA_ARGS__) +#define _LIBCUDACXX_IS_CLASS(...) __is_class(__VA_ARGS__) +#define _LIBCUDACXX_IS_AGGREGATE(...) __is_aggregate(__VA_ARGS__) +#define _LIBCUDACXX_IS_POD(...) __is_pod(__VA_ARGS__) +#define _LIBCUDACXX_IS_STANDARD_LAYOUT(...) __is_standard_layout(__VA_ARGS__) +#define _LIBCUDACXX_IS_ENUM(...) __is_enum(__VA_ARGS__) +#define _LIBCUDACXX_IS_EMPTY(...) __is_empty(__VA_ARGS__) +#define _LIBCUDACXX_IS_POLYMORPHIC(...) __is_polymorphic(__VA_ARGS__) +#define _LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR(...) __has_virtual_destructor(__VA_ARGS__) +#define _LIBCUDACXX_IS_BASE_OF(...) __is_base_of(__VA_ARGS__) +#define _LIBCUDACXX_IS_FINAL(...) __is_final(__VA_ARGS__) +#define _LIBCUDACXX_IS_CONVERTIBLE_TO(...) __is_convertible_to(__VA_ARGS__) +#define _LIBCUDACXX_DISABLE_EXTENSION_WARNING + +#define _LIBCUDACXX_HAS_REFERENCE_QUALIFIED_FUNCTIONS + +#define _LIBCUDACXX_HAS_NO_ASAN + +#define _LIBCUDACXX_ALWAYS_INLINE __attribute__ ((__always_inline__)) +#define _LIBCUDACXX_NORETURN __attribute__ ((noreturn)) + +#define _GLIBCXX_INCLUDE_NEXT_C_HEADERS + +#endif // _LIBCUDACXX_COMPILER_[CLANG|GCC|MSVC|IBM|NVRTC] + +#if defined(_LIBCUDACXX_COMPILER_PGI) && !defined(__cuda_std__) +// Forcefully disable visibility controls when used as the standard library with NVC++. +// TODO: reevaluate. +#define _LIBCUDACXX_HIDE_FROM_ABI +#ifndef _LIBCUDACXX_DISABLE_EXTERN_TEMPLATE +#define _LIBCUDACXX_DISABLE_EXTERN_TEMPLATE +#endif +#define _LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS +#endif + +#if defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) + +#ifdef _DLL +# define _LIBCUDACXX_CRT_FUNC __declspec(dllimport) +#else +# define _LIBCUDACXX_CRT_FUNC +#endif + +#if defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_DLL_VIS +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS +# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS +# define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS +# define _LIBCUDACXX_EXPORTED_FROM_ABI +#elif defined(_LIBCUDACXX_BUILDING_LIBRARY) +# define _LIBCUDACXX_DLL_VIS __declspec(dllexport) +# if defined(__MINGW32__) +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS _LIBCUDACXX_DLL_VIS +# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS +# else +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS +# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS _LIBCUDACXX_DLL_VIS +# endif +# define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS _LIBCUDACXX_DLL_VIS +# define _LIBCUDACXX_EXPORTED_FROM_ABI __declspec(dllexport) +#else +# define _LIBCUDACXX_DLL_VIS __declspec(dllimport) +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS _LIBCUDACXX_DLL_VIS +# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS +# define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS +# define _LIBCUDACXX_EXPORTED_FROM_ABI __declspec(dllimport) +#endif + +#define _LIBCUDACXX_TYPE_VIS _LIBCUDACXX_DLL_VIS +#define _LIBCUDACXX_FUNC_VIS _LIBCUDACXX_DLL_VIS +#define _LIBCUDACXX_EXCEPTION_ABI _LIBCUDACXX_DLL_VIS +#define _LIBCUDACXX_HIDDEN +#define _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS +#define _LIBCUDACXX_TEMPLATE_VIS +#define _LIBCUDACXX_ENUM_VIS + +#endif // defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) + +#ifndef _LIBCUDACXX_HIDDEN +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_HIDDEN __attribute__ ((__visibility__("hidden"))) +# else +# define _LIBCUDACXX_HIDDEN +# endif +#endif + +#ifndef _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +// The inline should be removed once PR32114 is resolved +# define _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS inline _LIBCUDACXX_HIDDEN +# else +# define _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS +# endif +#endif + +#ifndef _LIBCUDACXX_FUNC_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_FUNC_VIS __attribute__ ((__visibility__("default"))) +# else +# define _LIBCUDACXX_FUNC_VIS +# endif +#endif + +#ifndef _LIBCUDACXX_TYPE_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_TYPE_VIS __attribute__ ((__visibility__("default"))) +# else +# define _LIBCUDACXX_TYPE_VIS +# endif +#endif + +#ifndef _LIBCUDACXX_TEMPLATE_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# if __has_attribute(__type_visibility__) +# define _LIBCUDACXX_TEMPLATE_VIS __attribute__ ((__type_visibility__("default"))) +# else +# define _LIBCUDACXX_TEMPLATE_VIS __attribute__ ((__visibility__("default"))) +# endif +# else +# define _LIBCUDACXX_TEMPLATE_VIS +# endif +#endif + +#ifndef _LIBCUDACXX_EXPORTED_FROM_ABI +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_EXPORTED_FROM_ABI __attribute__((__visibility__("default"))) +# else +# define _LIBCUDACXX_EXPORTED_FROM_ABI +# endif +#endif + +#ifndef _LIBCUDACXX_OVERRIDABLE_FUNC_VIS +#define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS _LIBCUDACXX_FUNC_VIS +#endif + +#ifndef _LIBCUDACXX_EXCEPTION_ABI +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_EXCEPTION_ABI __attribute__ ((__visibility__("default"))) +# else +# define _LIBCUDACXX_EXCEPTION_ABI +# endif +#endif + +#ifndef _LIBCUDACXX_ENUM_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) && __has_attribute(__type_visibility__) +# define _LIBCUDACXX_ENUM_VIS __attribute__ ((__type_visibility__("default"))) +# else +# define _LIBCUDACXX_ENUM_VIS +# endif +#endif + +#ifndef _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) && __has_attribute(__type_visibility__) +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS __attribute__ ((__visibility__("default"))) +# else +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS +# endif +#endif + +#ifndef _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS +#define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS +#endif + +#if __has_attribute(internal_linkage) +# define _LIBCUDACXX_INTERNAL_LINKAGE __attribute__ ((internal_linkage)) +#else +# define _LIBCUDACXX_INTERNAL_LINKAGE _LIBCUDACXX_ALWAYS_INLINE +#endif + +#if __has_attribute(exclude_from_explicit_instantiation) +# define _LIBCUDACXX_EXCLUDE_FROM_EXPLICIT_INSTANTIATION __attribute__ ((__exclude_from_explicit_instantiation__)) +#else + // Try to approximate the effect of exclude_from_explicit_instantiation + // (which is that entities are not assumed to be provided by explicit + // template instantiations in the dylib) by always inlining those entities. +# define _LIBCUDACXX_EXCLUDE_FROM_EXPLICIT_INSTANTIATION _LIBCUDACXX_ALWAYS_INLINE +#endif + +#ifndef _LIBCUDACXX_HIDE_FROM_ABI_PER_TU +# ifndef _LIBCUDACXX_HIDE_FROM_ABI_PER_TU_BY_DEFAULT +# define _LIBCUDACXX_HIDE_FROM_ABI_PER_TU 0 +# else +# define _LIBCUDACXX_HIDE_FROM_ABI_PER_TU 1 +# endif +#endif + +#ifndef _LIBCUDACXX_HAS_MERGED_TYPEINFO_NAMES_DEFAULT +# ifdef _LIBCUDACXX_OBJECT_FORMAT_COFF // Windows binaries can't merge typeinfos. +# define _LIBCUDACXX_HAS_MERGED_TYPEINFO_NAMES_DEFAULT 0 +#else +// TODO: This isn't strictly correct on ELF platforms due to llvm.org/PR37398 +// And we should consider defaulting to OFF. +# define _LIBCUDACXX_HAS_MERGED_TYPEINFO_NAMES_DEFAULT 1 +#endif +#endif + +#ifndef _LIBCUDACXX_HIDE_FROM_ABI +# if _LIBCUDACXX_HIDE_FROM_ABI_PER_TU +# define _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_HIDDEN _LIBCUDACXX_INTERNAL_LINKAGE +# else +# define _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_HIDDEN _LIBCUDACXX_EXCLUDE_FROM_EXPLICIT_INSTANTIATION +# endif +#endif + +#ifdef _LIBCUDACXX_BUILDING_LIBRARY +# if _LIBCUDACXX_ABI_VERSION > 1 +# define _LIBCUDACXX_HIDE_FROM_ABI_AFTER_V1 _LIBCUDACXX_HIDE_FROM_ABI +# else +# define _LIBCUDACXX_HIDE_FROM_ABI_AFTER_V1 +# endif +#else +# define _LIBCUDACXX_HIDE_FROM_ABI_AFTER_V1 _LIBCUDACXX_HIDE_FROM_ABI +#endif + +// Just so we can migrate to the new macros gradually. +#define _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_HIDE_FROM_ABI + +#define _LIBCUDACXX_EXECUTION_SPACE_SPECIFIER + +// Inline namespaces are available in Clang/GCC/MSVC regardless of C++ dialect. +#define _LIBCUDACXX_BEGIN_NAMESPACE_STD namespace std { inline namespace _LIBCUDACXX_ABI_NAMESPACE { +#define _LIBCUDACXX_END_NAMESPACE_STD } } +#define _CUDA_VSTD std::_LIBCUDACXX_ABI_NAMESPACE +_LIBCUDACXX_BEGIN_NAMESPACE_STD _LIBCUDACXX_END_NAMESPACE_STD + +#define _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION namespace std { +#define _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION } + +#if _LIBCUDACXX_STD_VER >= 17 +#define _LIBCUDACXX_BEGIN_NAMESPACE_FILESYSTEM \ + _LIBCUDACXX_BEGIN_NAMESPACE_STD inline namespace __fs { namespace filesystem { +#else +#define _LIBCUDACXX_BEGIN_NAMESPACE_FILESYSTEM \ + _LIBCUDACXX_BEGIN_NAMESPACE_STD namespace __fs { namespace filesystem { +#endif + +#define _LIBCUDACXX_END_NAMESPACE_FILESYSTEM \ + _LIBCUDACXX_END_NAMESPACE_STD } } + +#define _CUDA_VSTD_FS _CUDA_VSTD::__fs::filesystem + +#ifndef _LIBCUDACXX_PREFERRED_OVERLOAD +# if __has_attribute(__enable_if__) +# define _LIBCUDACXX_PREFERRED_OVERLOAD __attribute__ ((__enable_if__(true, ""))) +# endif +#endif + +#ifndef _LIBCUDACXX_HAS_NO_NOEXCEPT +# define _NOEXCEPT noexcept +# define _NOEXCEPT_(x) noexcept(x) +#else +# define _NOEXCEPT throw() +# define _NOEXCEPT_(x) +#endif + +#ifdef _LIBCUDACXX_HAS_NO_UNICODE_CHARS +typedef unsigned short char16_t; +typedef unsigned int char32_t; +#endif // _LIBCUDACXX_HAS_NO_UNICODE_CHARS + +#ifndef __SIZEOF_INT128__ +#define _LIBCUDACXX_HAS_NO_INT128 +#endif + +#ifdef _LIBCUDACXX_CXX03_LANG +# define static_assert(...) _Static_assert(__VA_ARGS__) +# define decltype(...) __decltype(__VA_ARGS__) +#endif // _LIBCUDACXX_CXX03_LANG + +#ifdef _LIBCUDACXX_CXX03_LANG +# define _LIBCUDACXX_CONSTEXPR +#else +# define _LIBCUDACXX_CONSTEXPR constexpr +#endif + +#ifdef _LIBCUDACXX_CXX03_LANG +# define _LIBCUDACXX_DEFAULT {} +#else +# define _LIBCUDACXX_DEFAULT = default; +#endif + +#ifdef _LIBCUDACXX_CXX03_LANG +# define _LIBCUDACXX_EQUAL_DELETE +#else +# define _LIBCUDACXX_EQUAL_DELETE = delete +#endif + +#ifdef __GNUC__ +# define _LIBCUDACXX_NOALIAS __attribute__((__malloc__)) +#else +# define _LIBCUDACXX_NOALIAS +#endif + +#if __has_feature(cxx_explicit_conversions) || defined(__IBMCPP__) || \ + (!defined(_LIBCUDACXX_CXX03_LANG) && defined(__GNUC__)) // All supported GCC versions +# define _LIBCUDACXX_EXPLICIT explicit +#else +# define _LIBCUDACXX_EXPLICIT +#endif + +#if !__has_builtin(__builtin_operator_new) || !__has_builtin(__builtin_operator_delete) +#define _LIBCUDACXX_HAS_NO_BUILTIN_OPERATOR_NEW_DELETE +#endif + +#ifdef _LIBCUDACXX_HAS_NO_STRONG_ENUMS +# define _LIBCUDACXX_DECLARE_STRONG_ENUM(x) struct _LIBCUDACXX_TYPE_VIS x { enum __lx +# define _LIBCUDACXX_DECLARE_STRONG_ENUM_EPILOG(x) \ + __lx __v_; \ + _LIBCUDACXX_INLINE_VISIBILITY x(__lx __v) : __v_(__v) {} \ + _LIBCUDACXX_INLINE_VISIBILITY explicit x(int __v) : __v_(static_cast<__lx>(__v)) {} \ + _LIBCUDACXX_INLINE_VISIBILITY operator int() const {return __v_;} \ + }; +#else // _LIBCUDACXX_HAS_NO_STRONG_ENUMS +# define _LIBCUDACXX_DECLARE_STRONG_ENUM(x) enum class _LIBCUDACXX_ENUM_VIS x +# define _LIBCUDACXX_DECLARE_STRONG_ENUM_EPILOG(x) +#endif // _LIBCUDACXX_HAS_NO_STRONG_ENUMS + +#ifdef _LIBCUDACXX_DEBUG +# if _LIBCUDACXX_DEBUG == 0 +# define _LIBCUDACXX_DEBUG_LEVEL 1 +# elif _LIBCUDACXX_DEBUG == 1 +# define _LIBCUDACXX_DEBUG_LEVEL 2 +# else +# error Supported values for _LIBCUDACXX_DEBUG are 0 and 1 +# endif +# if !defined(_LIBCUDACXX_BUILDING_LIBRARY) +# define _LIBCUDACXX_EXTERN_TEMPLATE(...) +# endif +#endif + +#ifdef _LIBCUDACXX_DISABLE_EXTERN_TEMPLATE +#define _LIBCUDACXX_EXTERN_TEMPLATE(...) +#define _LIBCUDACXX_EXTERN_TEMPLATE2(...) +#endif + +#ifndef _LIBCUDACXX_EXTERN_TEMPLATE +#define _LIBCUDACXX_EXTERN_TEMPLATE(...) extern template __VA_ARGS__; +#endif + +#ifndef _LIBCUDACXX_EXTERN_TEMPLATE2 +#define _LIBCUDACXX_EXTERN_TEMPLATE2(...) extern template __VA_ARGS__; +#endif + +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(_LIBCUDACXX_MSVCRT_LIKE) || \ + defined(__sun__) || defined(__NetBSD__) || defined(__CloudABI__) +#define _LIBCUDACXX_LOCALE__L_EXTENSIONS 1 +#endif + +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) +// Most unix variants have catopen. These are the specific ones that don't. +# if !defined(__BIONIC__) && !defined(_NEWLIB_VERSION) +# define _LIBCUDACXX_HAS_CATOPEN 1 +# endif +#endif + +#ifdef __FreeBSD__ +#define _DECLARE_C99_LDBL_MATH 1 +#endif + +#if defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_NO_VCRUNTIME) +# define _LIBCUDACXX_DEFER_NEW_TO_VCRUNTIME +#endif + +// If we are getting operator new from the MSVC CRT, then allocation overloads +// for align_val_t were added in 19.12, aka VS 2017 version 15.3. +#if defined(_LIBCUDACXX_MSVCRT) && defined(_MSC_VER) && _MSC_VER < 1912 +# define _LIBCUDACXX_HAS_NO_LIBRARY_ALIGNED_ALLOCATION +#elif defined(_LIBCUDACXX_ABI_VCRUNTIME) && !defined(__cpp_aligned_new) + // We're deferring to Microsoft's STL to provide aligned new et al. We don't + // have it unless the language feature test macro is defined. +# define _LIBCUDACXX_HAS_NO_LIBRARY_ALIGNED_ALLOCATION +#endif + +#if defined(__APPLE__) +# if !defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ + defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) +# define __MAC_OS_X_VERSION_MIN_REQUIRED __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ +# endif +#endif // defined(__APPLE__) + +#if !defined(_LIBCUDACXX_HAS_NO_ALIGNED_ALLOCATION) && \ + (defined(_LIBCUDACXX_HAS_NO_LIBRARY_ALIGNED_ALLOCATION) || \ + (!defined(__cpp_aligned_new) || __cpp_aligned_new < 201606)) +# define _LIBCUDACXX_HAS_NO_ALIGNED_ALLOCATION +#endif + +#if defined(__APPLE__) || defined(__FreeBSD__) +#define _LIBCUDACXX_HAS_DEFAULTRUNELOCALE +#endif + +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__sun__) +#define _LIBCUDACXX_WCTYPE_IS_MASK +#endif + +#if _LIBCUDACXX_STD_VER <= 17 || !defined(__cpp_char8_t) +#define _LIBCUDACXX_NO_HAS_CHAR8_T +#endif + +// Deprecation macros. +// +// Deprecations warnings are always enabled, except when users explicitly opt-out +// by defining _LIBCUDACXX_DISABLE_DEPRECATION_WARNINGS. +#if !defined(_LIBCUDACXX_DISABLE_DEPRECATION_WARNINGS) +# if __has_attribute(deprecated) +# define _LIBCUDACXX_DEPRECATED __attribute__ ((deprecated)) +# elif _LIBCUDACXX_STD_VER > 11 +# define _LIBCUDACXX_DEPRECATED [[deprecated]] +# else +# define _LIBCUDACXX_DEPRECATED +# endif +#else +# define _LIBCUDACXX_DEPRECATED +#endif + +#if !defined(_LIBCUDACXX_CXX03_LANG) +# define _LIBCUDACXX_DEPRECATED_IN_CXX11 _LIBCUDACXX_DEPRECATED +#else +# define _LIBCUDACXX_DEPRECATED_IN_CXX11 +#endif + +#if _LIBCUDACXX_STD_VER >= 14 +# define _LIBCUDACXX_DEPRECATED_IN_CXX14 _LIBCUDACXX_DEPRECATED +#else +# define _LIBCUDACXX_DEPRECATED_IN_CXX14 +#endif + +#if _LIBCUDACXX_STD_VER >= 17 +# define _LIBCUDACXX_DEPRECATED_IN_CXX17 _LIBCUDACXX_DEPRECATED +#else +# define _LIBCUDACXX_DEPRECATED_IN_CXX17 +#endif + +#if _LIBCUDACXX_STD_VER <= 11 +# define _LIBCUDACXX_EXPLICIT_AFTER_CXX11 +#else +# define _LIBCUDACXX_EXPLICIT_AFTER_CXX11 explicit +#endif + +#if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_CXX14_CONSTEXPR) +# define _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 constexpr +#else +# define _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 +#endif + +#if _LIBCUDACXX_STD_VER > 14 && !defined(_LIBCUDACXX_HAS_NO_CXX14_CONSTEXPR) +# define _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 constexpr +#else +# define _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +#endif + +#if _LIBCUDACXX_STD_VER > 17 && !defined(_LIBCUDACXX_HAS_NO_CXX14_CONSTEXPR) +# define _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 constexpr +#else +# define _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 +#endif + +// Macros to enter and leave a state where deprecation warnings are suppressed. +#if defined(_LIBCUDACXX_COMPILER_CLANG) || defined(_LIBCUDACXX_COMPILER_GCC) +# define _LIBCUDACXX_SUPPRESS_DEPRECATED_PUSH \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated\"") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +# define _LIBCUDACXX_SUPPRESS_DEPRECATED_POP \ + _Pragma("GCC diagnostic pop") +#else +# define _LIBCUDACXX_SUPPRESS_DEPRECATED_PUSH +# define _LIBCUDACXX_SUPPRESS_DEPRECATED_POP +#endif + +// The _LIBCUDACXX_NODISCARD_ATTRIBUTE should only be used to define other +// NODISCARD macros to the correct attribute. +#if __has_cpp_attribute(nodiscard) || (defined(_LIBCUDACXX_COMPILER_MSVC) && _LIBCUDACXX_STD_VER > 14) +# define _LIBCUDACXX_NODISCARD_ATTRIBUTE [[nodiscard]] +#elif defined(_LIBCUDACXX_COMPILER_CLANG) && !defined(_LIBCUDACXX_CXX03_LANG) +# define _LIBCUDACXX_NODISCARD_ATTRIBUTE [[clang::warn_unused_result]] +#else +// We can't use GCC's [[gnu::warn_unused_result]] and +// __attribute__((warn_unused_result)), because GCC does not silence them via +// (void) cast. +# define _LIBCUDACXX_NODISCARD_ATTRIBUTE +#endif + +// _LIBCUDACXX_NODISCARD_EXT may be used to apply [[nodiscard]] to entities not +// specified as such as an extension. +#if defined(_LIBCUDACXX_ENABLE_NODISCARD) && !defined(_LIBCUDACXX_DISABLE_NODISCARD_EXT) +# define _LIBCUDACXX_NODISCARD_EXT _LIBCUDACXX_NODISCARD_ATTRIBUTE +#else +# define _LIBCUDACXX_NODISCARD_EXT +#endif + +#if !defined(_LIBCUDACXX_DISABLE_NODISCARD_AFTER_CXX17) && \ + (_LIBCUDACXX_STD_VER > 17 || defined(_LIBCUDACXX_ENABLE_NODISCARD)) +# define _LIBCUDACXX_NODISCARD_AFTER_CXX17 _LIBCUDACXX_NODISCARD_ATTRIBUTE +#else +# define _LIBCUDACXX_NODISCARD_AFTER_CXX17 +#endif + +#if _LIBCUDACXX_STD_VER > 14 && defined(__cpp_inline_variables) && (__cpp_inline_variables >= 201606L) +# define _LIBCUDACXX_INLINE_VAR inline +#else +# define _LIBCUDACXX_INLINE_VAR +#endif + +#ifdef _LIBCUDACXX_HAS_NO_RVALUE_REFERENCES +# define _LIBCUDACXX_EXPLICIT_MOVE(x) _CUDA_VSTD::move(x) +#else +# define _LIBCUDACXX_EXPLICIT_MOVE(x) (x) +#endif + +#ifndef _LIBCUDACXX_CONSTEXPR_IF_NODEBUG +#if defined(_LIBCUDACXX_DEBUG) || defined(_LIBCUDACXX_HAS_NO_CXX14_CONSTEXPR) +#define _LIBCUDACXX_CONSTEXPR_IF_NODEBUG +#else +#define _LIBCUDACXX_CONSTEXPR_IF_NODEBUG constexpr +#endif +#endif + +#if __has_attribute(no_destroy) +# define _LIBCUDACXX_NO_DESTROY __attribute__((__no_destroy__)) +#else +# define _LIBCUDACXX_NO_DESTROY +#endif + +#ifndef _LIBCUDACXX_HAS_NO_ASAN +extern "C" _LIBCUDACXX_FUNC_VIS void __sanitizer_annotate_contiguous_container( + const void *, const void *, const void *, const void *); +#endif + +// Try to find out if RTTI is disabled. +// g++ and cl.exe have RTTI on by default and define a macro when it is. +// g++ only defines the macro in 4.3.2 and onwards. +#if !defined(_LIBCUDACXX_NO_RTTI) +# if defined(__GNUC__) && \ + ((__GNUC__ >= 5) || \ + (__GNUC__ == 4 && (__GNUC_MINOR__ >= 3 || __GNUC_PATCHLEVEL__ >= 2))) && \ + !defined(__GXX_RTTI) +# define _LIBCUDACXX_NO_RTTI +# elif defined(_LIBCUDACXX_COMPILER_MSVC) && !defined(_CPPRTTI) +# define _LIBCUDACXX_NO_RTTI +# endif +#endif + +#ifndef _LIBCUDACXX_WEAK +#define _LIBCUDACXX_WEAK __attribute__((__weak__)) +#endif + +// Thread API +#if !defined(_LIBCUDACXX_HAS_NO_THREADS) && \ + !defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) && \ + !defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) && \ + !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) +# if defined(__FreeBSD__) || \ + defined(__Fuchsia__) || \ + defined(__wasi__) || \ + defined(__NetBSD__) || \ + defined(__linux__) || \ + defined(__GNU__) || \ + defined(__APPLE__) || \ + defined(__CloudABI__) || \ + defined(__sun__) || \ + (defined(__MINGW32__) && __has_include()) +# define _LIBCUDACXX_HAS_THREAD_API_PTHREAD +# elif defined(_LIBCUDACXX_WIN32API) +# define _LIBCUDACXX_HAS_THREAD_API_WIN32 +# else +# error "No thread API" +# endif // _LIBCUDACXX_HAS_THREAD_API +#endif // _LIBCUDACXX_HAS_NO_THREADS + +#if defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) +#if defined(__ANDROID__) && __ANDROID_API__ >= 30 +#define _LIBCUDACXX_HAS_COND_CLOCKWAIT +#elif defined(_LIBCUDACXX_GLIBC_PREREQ) +#if _LIBCUDACXX_GLIBC_PREREQ(2, 30) +#define _LIBCUDACXX_HAS_COND_CLOCKWAIT +#endif +#endif +#endif + +#if defined(_LIBCUDACXX_HAS_NO_THREADS) && defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) +#error _LIBCUDACXX_HAS_THREAD_API_PTHREAD may only be defined when \ + _LIBCUDACXX_HAS_NO_THREADS is not defined. +#endif + +#if defined(_LIBCUDACXX_HAS_NO_THREADS) && defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) +#error _LIBCUDACXX_HAS_THREAD_API_EXTERNAL may not be defined when \ + _LIBCUDACXX_HAS_NO_THREADS is defined. +#endif + +#if defined(__STDCPP_THREADS__) && defined(_LIBCUDACXX_HAS_NO_THREADS) +#error _LIBCUDACXX_HAS_NO_THREADS cannot be set when __STDCPP_THREADS__ is set. +#endif + +#if !defined(_LIBCUDACXX_HAS_NO_THREADS) && !defined(__STDCPP_THREADS__) +#define __STDCPP_THREADS__ 1 +#endif + +// The glibc and Bionic implementation of pthreads implements +// pthread_mutex_destroy as nop for regular mutexes. Additionally, Win32 +// mutexes have no destroy mechanism. +// +// This optimization can't be performed on Apple platforms, where +// pthread_mutex_destroy can allow the kernel to release resources. +// See https://llvm.org/D64298 for details. +// +// TODO(EricWF): Enable this optimization on Bionic after speaking to their +// respective stakeholders. +#if (defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) && defined(__GLIBC__)) \ + || defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) +# define _LIBCUDACXX_HAS_TRIVIAL_MUTEX_DESTRUCTION +#endif + +// Destroying a condvar is a nop on Windows. +// +// This optimization can't be performed on Apple platforms, where +// pthread_cond_destroy can allow the kernel to release resources. +// See https://llvm.org/D64298 for details. +// +// TODO(EricWF): This is potentially true for some pthread implementations +// as well. +#if defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) +# define _LIBCUDACXX_HAS_TRIVIAL_CONDVAR_DESTRUCTION +#endif + +// Systems that use capability-based security (FreeBSD with Capsicum, +// Nuxi CloudABI) may only provide local filesystem access (using *at()). +// Functions like open(), rename(), unlink() and stat() should not be +// used, as they attempt to access the global filesystem namespace. +#ifdef __CloudABI__ +#define _LIBCUDACXX_HAS_NO_GLOBAL_FILESYSTEM_NAMESPACE +#endif + +// CloudABI is intended for running networked services. Processes do not +// have standard input and output channels. +#ifdef __CloudABI__ +#define _LIBCUDACXX_HAS_NO_STDIN +#define _LIBCUDACXX_HAS_NO_STDOUT +#endif + +// Some systems do not provide gets() in their C library, for security reasons. +#ifndef _LIBCUDACXX_C_HAS_NO_GETS +# if defined(_LIBCUDACXX_MSVCRT) || (defined(__FreeBSD__) && __FreeBSD__ >= 13) +# define _LIBCUDACXX_C_HAS_NO_GETS +# endif +#endif + +#if defined(__BIONIC__) || defined(__CloudABI__) || \ + defined(__Fuchsia__) || defined(__wasi__) || defined(_LIBCUDACXX_HAS_MUSL_LIBC) +#define _LIBCUDACXX_PROVIDES_DEFAULT_RUNE_TABLE +#endif + +// Thread-unsafe functions such as strtok() and localtime() +// are not available. +#ifdef __CloudABI__ +#define _LIBCUDACXX_HAS_NO_THREAD_UNSAFE_C_FUNCTIONS +#endif + +// TODO: Support C11 Atomics? +// #if __has_feature(cxx_atomic) || __has_extension(c_atomic) || __has_keyword(_Atomic) +// # define _LIBCUDACXX_HAS_C_ATOMIC_IMP +#if defined(_LIBCUDACXX_COMPILER_CLANG) +# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP +#elif defined(_LIBCUDACXX_COMPILER_GCC) || defined(_LIBCUDACXX_COMPILER_PGI) +# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP +#elif defined(_LIBCUDACXX_COMPILER_PGI) +# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP +#elif defined(_LIBCUDACXX_COMPILER_MSVC) +# define _LIBCUDACXX_HAS_MSVC_ATOMIC_IMPL +#endif + +// CUDA Atomics supersede host atomics in order to insert the host/device dispatch layer +#if defined(_LIBCUDACXX_COMPILER_NVCC) || defined(_LIBCUDACXX_COMPILER_NVRTC) || defined(_LIBCUDACXX_COMPILER_PGI) +# define _LIBCUDACXX_HAS_CUDA_ATOMIC_IMPL +#endif + +#if (!defined(_LIBCUDACXX_HAS_C_ATOMIC_IMP) && \ + !defined(_LIBCUDACXX_HAS_GCC_ATOMIC_IMP) && \ + !defined(_LIBCUDACXX_HAS_EXTERNAL_ATOMIC_IMP)) \ + || defined(_LIBCUDACXX_HAS_NO_THREADS) +# define _LIBCUDACXX_HAS_NO_ATOMIC_HEADER +#else +# ifndef _LIBCUDACXX_ATOMIC_FLAG_TYPE +# define _LIBCUDACXX_ATOMIC_FLAG_TYPE bool +# endif +# ifdef _LIBCUDACXX_FREESTANDING +# define _LIBCUDACXX_ATOMIC_ONLY_USE_BUILTINS +# endif +#endif + +#ifndef _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK +#define _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK +#endif + +#if defined(_LIBCUDACXX_ENABLE_THREAD_SAFETY_ANNOTATIONS) +# if defined(__clang__) && __has_attribute(acquire_capability) +// Work around the attribute handling in clang. When both __declspec and +// __attribute__ are present, the processing goes awry preventing the definition +// of the types. +# if !defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) +# define _LIBCUDACXX_HAS_THREAD_SAFETY_ANNOTATIONS +# endif +# endif +#endif + +#if __has_attribute(require_constant_initialization) +# define _LIBCUDACXX_SAFE_STATIC __attribute__((__require_constant_initialization__)) +#else +# define _LIBCUDACXX_SAFE_STATIC +#endif + +#if !defined(_LIBCUDACXX_HAS_NO_OFF_T_FUNCTIONS) +# if defined(_LIBCUDACXX_MSVCRT) || defined(_NEWLIB_VERSION) +# define _LIBCUDACXX_HAS_NO_OFF_T_FUNCTIONS +# endif +#endif + +#if __has_attribute(diagnose_if) && !defined(_LIBCUDACXX_DISABLE_ADDITIONAL_DIAGNOSTICS) +# define _LIBCUDACXX_DIAGNOSE_WARNING(...) \ + __attribute__((diagnose_if(__VA_ARGS__, "warning"))) +# define _LIBCUDACXX_DIAGNOSE_ERROR(...) \ + __attribute__((diagnose_if(__VA_ARGS__, "error"))) +#else +# define _LIBCUDACXX_DIAGNOSE_WARNING(...) +# define _LIBCUDACXX_DIAGNOSE_ERROR(...) +#endif + +// Use a function like macro to imply that it must be followed by a semicolon +#if __cplusplus > 201402L && __has_cpp_attribute(fallthrough) +# define _LIBCUDACXX_FALLTHROUGH() [[fallthrough]] +#elif defined(__CUDACC_RTC__) +# define _LIBCUDACXX_FALLTHROUGH() ((void)0) +#elif __has_cpp_attribute(clang::fallthrough) +# define _LIBCUDACXX_FALLTHROUGH() [[clang::fallthrough]] +#elif defined(_LIBCUDACXX_COMPILER_PGI) +# define _LIBCUDACXX_FALLTHROUGH() +#elif __has_attribute(fallthough) || _GNUC_VER >= 700 +# define _LIBCUDACXX_FALLTHROUGH() __attribute__((__fallthrough__)) +#else +# define _LIBCUDACXX_FALLTHROUGH() ((void)0) +#endif + +#if __has_attribute(__nodebug__) +#define _LIBCUDACXX_NODEBUG __attribute__((__nodebug__)) +#else +#define _LIBCUDACXX_NODEBUG +#endif + +#ifndef _LIBCUDACXX_NODEBUG_TYPE +#if __has_attribute(__nodebug__) && \ + (defined(_LIBCUDACXX_CLANG_VER) && _LIBCUDACXX_CLANG_VER >= 1210) +#define _LIBCUDACXX_NODEBUG_TYPE __attribute__((nodebug)) +#else +#define _LIBCUDACXX_NODEBUG_TYPE +#endif +#endif // !defined(_LIBCUDACXX_NODEBUG_TYPE) + +#if defined(_LIBCUDACXX_ABI_MICROSOFT) && \ + (defined(_LIBCUDACXX_COMPILER_MSVC) || __has_declspec_attribute(empty_bases)) +# define _LIBCUDACXX_DECLSPEC_EMPTY_BASES __declspec(empty_bases) +#else +# define _LIBCUDACXX_DECLSPEC_EMPTY_BASES +#endif + +#if defined(_LIBCUDACXX_ENABLE_CXX17_REMOVED_FEATURES) +#define _LIBCUDACXX_ENABLE_CXX17_REMOVED_AUTO_PTR +#define _LIBCUDACXX_ENABLE_CXX17_REMOVED_UNEXPECTED_FUNCTIONS +#define _LIBCUDACXX_ENABLE_CXX17_REMOVED_RANDOM_SHUFFLE +#define _LIBCUDACXX_ENABLE_CXX17_REMOVED_BINDERS +#endif // _LIBCUDACXX_ENABLE_CXX17_REMOVED_FEATURES + +#if !defined(__cpp_deduction_guides) || __cpp_deduction_guides < 201611 +#define _LIBCUDACXX_HAS_NO_DEDUCTION_GUIDES +#endif + +#if !defined(__cpp_coroutines) || __cpp_coroutines < 201703L +#define _LIBCUDACXX_HAS_NO_COROUTINES +#endif + +// FIXME: Correct this macro when either (A) a feature test macro for the +// spaceship operator is provided, or (B) a compiler provides a complete +// implementation. +#define _LIBCUDACXX_HAS_NO_SPACESHIP_OPERATOR + +// Decide whether to use availability macros. +#if !defined(_LIBCUDACXX_BUILDING_LIBRARY) && \ + !defined(_LIBCUDACXX_DISABLE_AVAILABILITY) && \ + __has_feature(attribute_availability_with_strict) && \ + __has_feature(attribute_availability_in_templates) && \ + __has_extension(pragma_clang_attribute_external_declaration) +# ifdef __APPLE__ +# define _LIBCUDACXX_USE_AVAILABILITY_APPLE +# endif +#endif + +// Define availability macros. +#if defined(_LIBCUDACXX_USE_AVAILABILITY_APPLE) +# define _LIBCUDACXX_AVAILABILITY_SHARED_MUTEX \ + __attribute__((availability(macosx,strict,introduced=10.12))) \ + __attribute__((availability(ios,strict,introduced=10.0))) \ + __attribute__((availability(tvos,strict,introduced=10.0))) \ + __attribute__((availability(watchos,strict,introduced=3.0))) +# define _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS \ + __attribute__((availability(macosx,strict,introduced=10.14))) \ + __attribute__((availability(ios,strict,introduced=12.0))) \ + __attribute__((availability(tvos,strict,introduced=12.0))) \ + __attribute__((availability(watchos,strict,introduced=5.0))) +# define _LIBCUDACXX_AVAILABILITY_BAD_VARIANT_ACCESS \ + _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS +# define _LIBCUDACXX_AVAILABILITY_BAD_ANY_CAST \ + _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS +# define _LIBCUDACXX_AVAILABILITY_UNCAUGHT_EXCEPTIONS \ + __attribute__((availability(macosx,strict,introduced=10.12))) \ + __attribute__((availability(ios,strict,introduced=10.0))) \ + __attribute__((availability(tvos,strict,introduced=10.0))) \ + __attribute__((availability(watchos,strict,introduced=3.0))) +# define _LIBCUDACXX_AVAILABILITY_SIZED_NEW_DELETE \ + __attribute__((availability(macosx,strict,introduced=10.12))) \ + __attribute__((availability(ios,strict,introduced=10.0))) \ + __attribute__((availability(tvos,strict,introduced=10.0))) \ + __attribute__((availability(watchos,strict,introduced=3.0))) +# define _LIBCUDACXX_AVAILABILITY_FUTURE_ERROR \ + __attribute__((availability(ios,strict,introduced=6.0))) +# define _LIBCUDACXX_AVAILABILITY_TYPEINFO_VTABLE \ + __attribute__((availability(macosx,strict,introduced=10.9))) \ + __attribute__((availability(ios,strict,introduced=7.0))) +# define _LIBCUDACXX_AVAILABILITY_LOCALE_CATEGORY \ + __attribute__((availability(macosx,strict,introduced=10.9))) \ + __attribute__((availability(ios,strict,introduced=7.0))) +# define _LIBCUDACXX_AVAILABILITY_ATOMIC_SHARED_PTR \ + __attribute__((availability(macosx,strict,introduced=10.9))) \ + __attribute__((availability(ios,strict,introduced=7.0))) +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM \ + __attribute__((availability(macosx,strict,introduced=10.15))) \ + __attribute__((availability(ios,strict,introduced=13.0))) \ + __attribute__((availability(tvos,strict,introduced=13.0))) \ + __attribute__((availability(watchos,strict,introduced=6.0))) +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_PUSH \ + _Pragma("clang attribute push(__attribute__((availability(macosx,strict,introduced=10.15))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(ios,strict,introduced=13.0))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(tvos,strict,introduced=13.0))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(watchos,strict,introduced=6.0))), apply_to=any(function,record))") +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_POP \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") +#else +# define _LIBCUDACXX_AVAILABILITY_SHARED_MUTEX +# define _LIBCUDACXX_AVAILABILITY_BAD_VARIANT_ACCESS +# define _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS +# define _LIBCUDACXX_AVAILABILITY_BAD_ANY_CAST +# define _LIBCUDACXX_AVAILABILITY_UNCAUGHT_EXCEPTIONS +# define _LIBCUDACXX_AVAILABILITY_SIZED_NEW_DELETE +# define _LIBCUDACXX_AVAILABILITY_FUTURE_ERROR +# define _LIBCUDACXX_AVAILABILITY_TYPEINFO_VTABLE +# define _LIBCUDACXX_AVAILABILITY_LOCALE_CATEGORY +# define _LIBCUDACXX_AVAILABILITY_ATOMIC_SHARED_PTR +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_PUSH +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_POP +#endif + +// Define availability that depends on _LIBCUDACXX_NO_EXCEPTIONS. +#ifdef _LIBCUDACXX_NO_EXCEPTIONS +# define _LIBCUDACXX_AVAILABILITY_FUTURE +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_ANY_CAST +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_VARIANT_ACCESS +#else +# define _LIBCUDACXX_AVAILABILITY_FUTURE _LIBCUDACXX_AVAILABILITY_FUTURE_ERROR +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_ANY_CAST _LIBCUDACXX_AVAILABILITY_BAD_ANY_CAST +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_VARIANT_ACCESS _LIBCUDACXX_AVAILABILITY_BAD_VARIANT_ACCESS +#endif + +// The stream API was dropped and re-added in the dylib shipped on macOS +// and iOS. We can only assume the dylib to provide these definitions for +// macosx >= 10.9 and ios >= 7.0. Otherwise, the definitions are available +// from the headers, but not from the dylib. Explicit instantiation +// declarations for streams exist conditionally to this; if we provide +// an explicit instantiation declaration and we try to deploy to a dylib +// that does not provide those symbols, we'll get a load-time error. +#if !defined(_LIBCUDACXX_BUILDING_LIBRARY) && \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1090) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 70000)) +# define _LIBCUDACXX_DO_NOT_ASSUME_STREAMS_EXPLICIT_INSTANTIATION_IN_DYLIB +#endif + +#if defined(_LIBCUDACXX_COMPILER_IBM) +#define _LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO +#endif + +#if defined(_LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO) +# define _LIBCUDACXX_PUSH_MACROS +# define _LIBCUDACXX_POP_MACROS +#else + // Don't warn about macro conflicts when we can restore them at the + // end of the header. +# ifndef _LIBCUDACXX_DISABLE_MACRO_CONFLICT_WARNINGS +# define _LIBCUDACXX_DISABLE_MACRO_CONFLICT_WARNINGS +# endif +# if defined(_LIBCUDACXX_COMPILER_MSVC) +# define _LIBCUDACXX_PUSH_MACROS \ + __pragma(push_macro("min")) \ + __pragma(push_macro("max")) +# define _LIBCUDACXX_POP_MACROS \ + __pragma(pop_macro("min")) \ + __pragma(pop_macro("max")) +# else +# define _LIBCUDACXX_PUSH_MACROS \ + _Pragma("push_macro(\"min\")") \ + _Pragma("push_macro(\"max\")") +# define _LIBCUDACXX_POP_MACROS \ + _Pragma("pop_macro(\"min\")") \ + _Pragma("pop_macro(\"max\")") +# endif +#endif // defined(_LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO) + +#ifndef _LIBCUDACXX_NO_AUTO_LINK +# if defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_BUILDING_LIBRARY) +# if defined(_DLL) +# pragma comment(lib, "c++.lib") +# else +# pragma comment(lib, "libc++.lib") +# endif +# endif // defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_BUILDING_LIBRARY) +#endif // _LIBCUDACXX_NO_AUTO_LINK + +#define _LIBCUDACXX_UNUSED_VAR(x) ((void)(x)) + +// Configures the fopen close-on-exec mode character, if any. This string will +// be appended to any mode string used by fstream for fopen/fdopen. +// +// Not all platforms support this, but it helps avoid fd-leaks on platforms that +// do. +#if defined(__BIONIC__) +# define _LIBCUDACXX_FOPEN_CLOEXEC_MODE "e" +#else +# define _LIBCUDACXX_FOPEN_CLOEXEC_MODE +#endif + +#ifndef _LIBCUDACXX_SYS_CLOCK_DURATION +# define _LIBCUDACXX_SYS_CLOCK_DURATION microseconds +#endif + +#endif // __cplusplus + +#endif // _LIBCUDACXX_CONFIG diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__config_site.in b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__config_site.in new file mode 100644 index 000000000000..ab65c9554b0b --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__config_site.in @@ -0,0 +1,36 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX_CONFIG_SITE +#define _LIBCUDACXX_CONFIG_SITE + +#cmakedefine _LIBCUDACXX_ABI_VERSION @_LIBCUDACXX_ABI_VERSION@ +#cmakedefine _LIBCUDACXX_ABI_UNSTABLE +#cmakedefine _LIBCUDACXX_ABI_FORCE_ITANIUM +#cmakedefine _LIBCUDACXX_ABI_FORCE_MICROSOFT +#cmakedefine _LIBCUDACXX_HIDE_FROM_ABI_PER_TU_BY_DEFAULT +#cmakedefine _LIBCUDACXX_HAS_NO_GLOBAL_FILESYSTEM_NAMESPACE +#cmakedefine _LIBCUDACXX_HAS_NO_STDIN +#cmakedefine _LIBCUDACXX_HAS_NO_STDOUT +#cmakedefine _LIBCUDACXX_HAS_NO_THREADS +#cmakedefine _LIBCUDACXX_HAS_NO_MONOTONIC_CLOCK +#cmakedefine _LIBCUDACXX_HAS_NO_THREAD_UNSAFE_C_FUNCTIONS +#cmakedefine _LIBCUDACXX_HAS_MUSL_LIBC +#cmakedefine _LIBCUDACXX_HAS_THREAD_API_PTHREAD +#cmakedefine _LIBCUDACXX_HAS_THREAD_API_EXTERNAL +#cmakedefine _LIBCUDACXX_HAS_THREAD_API_WIN32 +#cmakedefine _LIBCUDACXX_HAS_THREAD_LIBRARY_EXTERNAL +#cmakedefine _LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS +#cmakedefine _LIBCUDACXX_NO_VCRUNTIME +#cmakedefine01 _LIBCUDACXX_HAS_MERGED_TYPEINFO_NAMES_DEFAULT +#cmakedefine _LIBCUDACXX_ABI_NAMESPACE @_LIBCUDACXX_ABI_NAMESPACE@ +#cmakedefine _LIBCUDACXX_HAS_PARALLEL_ALGORITHMS + +@_LIBCUDACXX_ABI_DEFINES@ + +#endif // _LIBCUDACXX_CONFIG_SITE diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__debug b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__debug new file mode 100644 index 000000000000..687fce51cdd6 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__debug @@ -0,0 +1,284 @@ +// -*- C++ -*- +//===--------------------------- __debug ----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX_DEBUG_H +#define _LIBCUDACXX_DEBUG_H + +#ifndef __cuda_std__ +#include <__config> +#include +#if defined(_LIBCUDACXX_HAS_NO_NULLPTR) +# include +#endif +#if _LIBCUDACXX_DEBUG_LEVEL >= 1 || defined(_LIBCUDACXX_BUILDING_LIBRARY) +# include +# include +# include +#endif +#include <__pragma_push> +#endif //__cuda_std__ + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +#if _LIBCUDACXX_DEBUG_LEVEL >= 1 && !defined(_LIBCUDACXX_ASSERT) +# define _LIBCUDACXX_ASSERT(x, m) ((x) ? (void)0 : \ + _CUDA_VSTD::__libcpp_debug_function(_CUDA_VSTD::__libcpp_debug_info(__FILE__, __LINE__, #x, m))) +#endif + +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 +#ifndef _LIBCUDACXX_DEBUG_ASSERT +#define _LIBCUDACXX_DEBUG_ASSERT(x, m) _LIBCUDACXX_ASSERT(x, m) +#endif +#define _LIBCUDACXX_DEBUG_MODE(...) __VA_ARGS__ +#endif + +#ifndef _LIBCUDACXX_ASSERT +# define _LIBCUDACXX_ASSERT(x, m) ((void)0) +#endif +#ifndef _LIBCUDACXX_DEBUG_ASSERT +# define _LIBCUDACXX_DEBUG_ASSERT(x, m) ((void)0) +#endif +#ifndef _LIBCUDACXX_DEBUG_MODE +#define _LIBCUDACXX_DEBUG_MODE(...) ((void)0) +#endif + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +struct _LIBCUDACXX_TEMPLATE_VIS __libcpp_debug_info { + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR + __libcpp_debug_info() + : __file_(nullptr), __line_(-1), __pred_(nullptr), __msg_(nullptr) {} + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR + __libcpp_debug_info(const char* __f, int __l, const char* __p, const char* __m) + : __file_(__f), __line_(__l), __pred_(__p), __msg_(__m) {} + + _LIBCUDACXX_FUNC_VIS std::string what() const; + + const char* __file_; + int __line_; + const char* __pred_; + const char* __msg_; +}; + +/// __libcpp_debug_function_type - The type of the assertion failure handler. +typedef void(*__libcpp_debug_function_type)(__libcpp_debug_info const&); + +/// __libcpp_debug_function - The handler function called when a _LIBCUDACXX_ASSERT +/// fails. +extern _LIBCUDACXX_EXPORTED_FROM_ABI __libcpp_debug_function_type __libcpp_debug_function; + +/// __libcpp_abort_debug_function - A debug handler that aborts when called. +_LIBCUDACXX_NORETURN _LIBCUDACXX_FUNC_VIS +void __libcpp_abort_debug_function(__libcpp_debug_info const&); + +/// __libcpp_set_debug_function - Set the debug handler to the specified +/// function. +_LIBCUDACXX_FUNC_VIS +bool __libcpp_set_debug_function(__libcpp_debug_function_type __func); + +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 || defined(_LIBCUDACXX_BUILDING_LIBRARY) + +struct _LIBCUDACXX_TYPE_VIS __c_node; + +struct _LIBCUDACXX_TYPE_VIS __i_node +{ + void* __i_; + __i_node* __next_; + __c_node* __c_; + +#ifndef _LIBCUDACXX_CXX03_LANG + __i_node(const __i_node&) = delete; + __i_node& operator=(const __i_node&) = delete; +#else +private: + __i_node(const __i_node&); + __i_node& operator=(const __i_node&); +public: +#endif + _LIBCUDACXX_INLINE_VISIBILITY + __i_node(void* __i, __i_node* __next, __c_node* __c) + : __i_(__i), __next_(__next), __c_(__c) {} + ~__i_node(); +}; + +struct _LIBCUDACXX_TYPE_VIS __c_node +{ + void* __c_; + __c_node* __next_; + __i_node** beg_; + __i_node** end_; + __i_node** cap_; + +#ifndef _LIBCUDACXX_CXX03_LANG + __c_node(const __c_node&) = delete; + __c_node& operator=(const __c_node&) = delete; +#else +private: + __c_node(const __c_node&); + __c_node& operator=(const __c_node&); +public: +#endif + _LIBCUDACXX_INLINE_VISIBILITY + __c_node(void* __c, __c_node* __next) + : __c_(__c), __next_(__next), beg_(nullptr), end_(nullptr), cap_(nullptr) {} + virtual ~__c_node(); + + virtual bool __dereferenceable(const void*) const = 0; + virtual bool __decrementable(const void*) const = 0; + virtual bool __addable(const void*, ptrdiff_t) const = 0; + virtual bool __subscriptable(const void*, ptrdiff_t) const = 0; + + void __add(__i_node* __i); + _LIBCUDACXX_HIDDEN void __remove(__i_node* __i); +}; + +template +struct _C_node + : public __c_node +{ + _C_node(void* __c, __c_node* __n) + : __c_node(__c, __n) {} + + virtual bool __dereferenceable(const void*) const; + virtual bool __decrementable(const void*) const; + virtual bool __addable(const void*, ptrdiff_t) const; + virtual bool __subscriptable(const void*, ptrdiff_t) const; +}; + +template +inline bool +_C_node<_Cont>::__dereferenceable(const void* __i) const +{ + typedef typename _Cont::const_iterator iterator; + const iterator* __j = static_cast(__i); + _Cont* _Cp = static_cast<_Cont*>(__c_); + return _Cp->__dereferenceable(__j); +} + +template +inline bool +_C_node<_Cont>::__decrementable(const void* __i) const +{ + typedef typename _Cont::const_iterator iterator; + const iterator* __j = static_cast(__i); + _Cont* _Cp = static_cast<_Cont*>(__c_); + return _Cp->__decrementable(__j); +} + +template +inline bool +_C_node<_Cont>::__addable(const void* __i, ptrdiff_t __n) const +{ + typedef typename _Cont::const_iterator iterator; + const iterator* __j = static_cast(__i); + _Cont* _Cp = static_cast<_Cont*>(__c_); + return _Cp->__addable(__j, __n); +} + +template +inline bool +_C_node<_Cont>::__subscriptable(const void* __i, ptrdiff_t __n) const +{ + typedef typename _Cont::const_iterator iterator; + const iterator* __j = static_cast(__i); + _Cont* _Cp = static_cast<_Cont*>(__c_); + return _Cp->__subscriptable(__j, __n); +} + +class _LIBCUDACXX_TYPE_VIS __libcpp_db +{ + __c_node** __cbeg_; + __c_node** __cend_; + size_t __csz_; + __i_node** __ibeg_; + __i_node** __iend_; + size_t __isz_; + + __libcpp_db(); +public: +#ifndef _LIBCUDACXX_CXX03_LANG + __libcpp_db(const __libcpp_db&) = delete; + __libcpp_db& operator=(const __libcpp_db&) = delete; +#else +private: + __libcpp_db(const __libcpp_db&); + __libcpp_db& operator=(const __libcpp_db&); +public: +#endif + ~__libcpp_db(); + + class __db_c_iterator; + class __db_c_const_iterator; + class __db_i_iterator; + class __db_i_const_iterator; + + __db_c_const_iterator __c_end() const; + __db_i_const_iterator __i_end() const; + + typedef __c_node*(_InsertConstruct)(void*, void*, __c_node*); + + template + _LIBCUDACXX_INLINE_VISIBILITY static __c_node* __create_C_node(void *__mem, void *__c, __c_node *__next) { + return ::new(__mem) _C_node<_Cont>(__c, __next); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void __insert_c(_Cont* __c) + { + __insert_c(static_cast(__c), &__create_C_node<_Cont>); + } + + void __insert_i(void* __i); + void __insert_c(void* __c, _InsertConstruct* __fn); + void __erase_c(void* __c); + + void __insert_ic(void* __i, const void* __c); + void __iterator_copy(void* __i, const void* __i0); + void __erase_i(void* __i); + + void* __find_c_from_i(void* __i) const; + void __invalidate_all(void* __c); + __c_node* __find_c_and_lock(void* __c) const; + __c_node* __find_c(void* __c) const; + void unlock() const; + + void swap(void* __c1, void* __c2); + + + bool __dereferenceable(const void* __i) const; + bool __decrementable(const void* __i) const; + bool __addable(const void* __i, ptrdiff_t __n) const; + bool __subscriptable(const void* __i, ptrdiff_t __n) const; + bool __less_than_comparable(const void* __i, const void* __j) const; +private: + _LIBCUDACXX_HIDDEN + __i_node* __insert_iterator(void* __i); + _LIBCUDACXX_HIDDEN + __i_node* __find_iterator(const void* __i) const; + + friend _LIBCUDACXX_FUNC_VIS __libcpp_db* __get_db(); +}; + +_LIBCUDACXX_FUNC_VIS __libcpp_db* __get_db(); +_LIBCUDACXX_FUNC_VIS const __libcpp_db* __get_const_db(); + + +#endif // _LIBCUDACXX_DEBUG_LEVEL >= 2 || defined(_LIBCUDACXX_BUILDING_LIBRARY) + +_LIBCUDACXX_END_NAMESPACE_STD + +#ifndef __cuda_std__ + #include <__pragma_pop> +#endif + +#endif // _LIBCUDACXX_DEBUG_H + diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__errc b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__errc new file mode 100644 index 000000000000..03382fa86500 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__errc @@ -0,0 +1,217 @@ +// -*- C++ -*- +//===---------------------------- __errc ----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___ERRC +#define _LIBCUDACXX___ERRC + +/* + system_error synopsis + +namespace std +{ + +enum class errc +{ + address_family_not_supported, // EAFNOSUPPORT + address_in_use, // EADDRINUSE + address_not_available, // EADDRNOTAVAIL + already_connected, // EISCONN + argument_list_too_long, // E2BIG + argument_out_of_domain, // EDOM + bad_address, // EFAULT + bad_file_descriptor, // EBADF + bad_message, // EBADMSG + broken_pipe, // EPIPE + connection_aborted, // ECONNABORTED + connection_already_in_progress, // EALREADY + connection_refused, // ECONNREFUSED + connection_reset, // ECONNRESET + cross_device_link, // EXDEV + destination_address_required, // EDESTADDRREQ + device_or_resource_busy, // EBUSY + directory_not_empty, // ENOTEMPTY + executable_format_error, // ENOEXEC + file_exists, // EEXIST + file_too_large, // EFBIG + filename_too_long, // ENAMETOOLONG + function_not_supported, // ENOSYS + host_unreachable, // EHOSTUNREACH + identifier_removed, // EIDRM + illegal_byte_sequence, // EILSEQ + inappropriate_io_control_operation, // ENOTTY + interrupted, // EINTR + invalid_argument, // EINVAL + invalid_seek, // ESPIPE + io_error, // EIO + is_a_directory, // EISDIR + message_size, // EMSGSIZE + network_down, // ENETDOWN + network_reset, // ENETRESET + network_unreachable, // ENETUNREACH + no_buffer_space, // ENOBUFS + no_child_process, // ECHILD + no_link, // ENOLINK + no_lock_available, // ENOLCK + no_message_available, // ENODATA + no_message, // ENOMSG + no_protocol_option, // ENOPROTOOPT + no_space_on_device, // ENOSPC + no_stream_resources, // ENOSR + no_such_device_or_address, // ENXIO + no_such_device, // ENODEV + no_such_file_or_directory, // ENOENT + no_such_process, // ESRCH + not_a_directory, // ENOTDIR + not_a_socket, // ENOTSOCK + not_a_stream, // ENOSTR + not_connected, // ENOTCONN + not_enough_memory, // ENOMEM + not_supported, // ENOTSUP + operation_canceled, // ECANCELED + operation_in_progress, // EINPROGRESS + operation_not_permitted, // EPERM + operation_not_supported, // EOPNOTSUPP + operation_would_block, // EWOULDBLOCK + owner_dead, // EOWNERDEAD + permission_denied, // EACCES + protocol_error, // EPROTO + protocol_not_supported, // EPROTONOSUPPORT + read_only_file_system, // EROFS + resource_deadlock_would_occur, // EDEADLK + resource_unavailable_try_again, // EAGAIN + result_out_of_range, // ERANGE + state_not_recoverable, // ENOTRECOVERABLE + stream_timeout, // ETIME + text_file_busy, // ETXTBSY + timed_out, // ETIMEDOUT + too_many_files_open_in_system, // ENFILE + too_many_files_open, // EMFILE + too_many_links, // EMLINK + too_many_symbolic_link_levels, // ELOOP + value_too_large, // EOVERFLOW + wrong_protocol_type // EPROTOTYPE +}; + +*/ + +#include <__config> +#include + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +// Some error codes are not present on all platforms, so we provide equivalents +// for them: + +//enum class errc +_LIBCUDACXX_DECLARE_STRONG_ENUM(errc) +{ + address_family_not_supported = EAFNOSUPPORT, + address_in_use = EADDRINUSE, + address_not_available = EADDRNOTAVAIL, + already_connected = EISCONN, + argument_list_too_long = E2BIG, + argument_out_of_domain = EDOM, + bad_address = EFAULT, + bad_file_descriptor = EBADF, + bad_message = EBADMSG, + broken_pipe = EPIPE, + connection_aborted = ECONNABORTED, + connection_already_in_progress = EALREADY, + connection_refused = ECONNREFUSED, + connection_reset = ECONNRESET, + cross_device_link = EXDEV, + destination_address_required = EDESTADDRREQ, + device_or_resource_busy = EBUSY, + directory_not_empty = ENOTEMPTY, + executable_format_error = ENOEXEC, + file_exists = EEXIST, + file_too_large = EFBIG, + filename_too_long = ENAMETOOLONG, + function_not_supported = ENOSYS, + host_unreachable = EHOSTUNREACH, + identifier_removed = EIDRM, + illegal_byte_sequence = EILSEQ, + inappropriate_io_control_operation = ENOTTY, + interrupted = EINTR, + invalid_argument = EINVAL, + invalid_seek = ESPIPE, + io_error = EIO, + is_a_directory = EISDIR, + message_size = EMSGSIZE, + network_down = ENETDOWN, + network_reset = ENETRESET, + network_unreachable = ENETUNREACH, + no_buffer_space = ENOBUFS, + no_child_process = ECHILD, + no_link = ENOLINK, + no_lock_available = ENOLCK, +#ifdef ENODATA + no_message_available = ENODATA, +#else + no_message_available = ENOMSG, +#endif + no_message = ENOMSG, + no_protocol_option = ENOPROTOOPT, + no_space_on_device = ENOSPC, +#ifdef ENOSR + no_stream_resources = ENOSR, +#else + no_stream_resources = ENOMEM, +#endif + no_such_device_or_address = ENXIO, + no_such_device = ENODEV, + no_such_file_or_directory = ENOENT, + no_such_process = ESRCH, + not_a_directory = ENOTDIR, + not_a_socket = ENOTSOCK, +#ifdef ENOSTR + not_a_stream = ENOSTR, +#else + not_a_stream = EINVAL, +#endif + not_connected = ENOTCONN, + not_enough_memory = ENOMEM, + not_supported = ENOTSUP, + operation_canceled = ECANCELED, + operation_in_progress = EINPROGRESS, + operation_not_permitted = EPERM, + operation_not_supported = EOPNOTSUPP, + operation_would_block = EWOULDBLOCK, + owner_dead = EOWNERDEAD, + permission_denied = EACCES, + protocol_error = EPROTO, + protocol_not_supported = EPROTONOSUPPORT, + read_only_file_system = EROFS, + resource_deadlock_would_occur = EDEADLK, + resource_unavailable_try_again = EAGAIN, + result_out_of_range = ERANGE, + state_not_recoverable = ENOTRECOVERABLE, +#ifdef ETIME + stream_timeout = ETIME, +#else + stream_timeout = ETIMEDOUT, +#endif + text_file_busy = ETXTBSY, + timed_out = ETIMEDOUT, + too_many_files_open_in_system = ENFILE, + too_many_files_open = EMFILE, + too_many_links = EMLINK, + too_many_symbolic_link_levels = ELOOP, + value_too_large = EOVERFLOW, + wrong_protocol_type = EPROTOTYPE +}; +_LIBCUDACXX_DECLARE_STRONG_ENUM_EPILOG(errc) + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _LIBCUDACXX___ERRC diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_03 b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_03 new file mode 100644 index 000000000000..f9610b6ea384 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_03 @@ -0,0 +1,1595 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX_FUNCTIONAL_03 +#define _LIBCUDACXX_FUNCTIONAL_03 + +// manual variadic expansion for + +#include <__pragma_push> + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +namespace __function { + +template class __base; + +template +class __base<_Rp()> +{ + __base(const __base&); + __base& operator=(const __base&); +public: + __base() {} + virtual ~__base() {} + virtual __base* __clone() const = 0; + virtual void __clone(__base*) const = 0; + virtual void destroy() = 0; + virtual void destroy_deallocate() = 0; + virtual _Rp operator()() = 0; +#ifndef _LIBCUDACXX_NO_RTTI + virtual const void* target(const type_info&) const = 0; + virtual const std::type_info& target_type() const = 0; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +class __base<_Rp(_A0)> +{ + __base(const __base&); + __base& operator=(const __base&); +public: + __base() {} + virtual ~__base() {} + virtual __base* __clone() const = 0; + virtual void __clone(__base*) const = 0; + virtual void destroy() = 0; + virtual void destroy_deallocate() = 0; + virtual _Rp operator()(_A0) = 0; +#ifndef _LIBCUDACXX_NO_RTTI + virtual const void* target(const type_info&) const = 0; + virtual const std::type_info& target_type() const = 0; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +class __base<_Rp(_A0, _A1)> +{ + __base(const __base&); + __base& operator=(const __base&); +public: + __base() {} + virtual ~__base() {} + virtual __base* __clone() const = 0; + virtual void __clone(__base*) const = 0; + virtual void destroy() = 0; + virtual void destroy_deallocate() = 0; + virtual _Rp operator()(_A0, _A1) = 0; +#ifndef _LIBCUDACXX_NO_RTTI + virtual const void* target(const type_info&) const = 0; + virtual const std::type_info& target_type() const = 0; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +class __base<_Rp(_A0, _A1, _A2)> +{ + __base(const __base&); + __base& operator=(const __base&); +public: + __base() {} + virtual ~__base() {} + virtual __base* __clone() const = 0; + virtual void __clone(__base*) const = 0; + virtual void destroy() = 0; + virtual void destroy_deallocate() = 0; + virtual _Rp operator()(_A0, _A1, _A2) = 0; +#ifndef _LIBCUDACXX_NO_RTTI + virtual const void* target(const type_info&) const = 0; + virtual const std::type_info& target_type() const = 0; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template class __func; + +template +class __func<_Fp, _Alloc, _Rp()> + : public __base<_Rp()> +{ + __compressed_pair<_Fp, _Alloc> __f_; +public: + explicit __func(_Fp __f) : __f_(_CUDA_VSTD::move(__f)) {} + explicit __func(_Fp __f, _Alloc __a) : __f_(_CUDA_VSTD::move(__f), _CUDA_VSTD::move(__a)) {} + virtual __base<_Rp()>* __clone() const; + virtual void __clone(__base<_Rp()>*) const; + virtual void destroy(); + virtual void destroy_deallocate(); + virtual _Rp operator()(); +#ifndef _LIBCUDACXX_NO_RTTI + virtual const void* target(const type_info&) const; + virtual const std::type_info& target_type() const; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +__base<_Rp()>* +__func<_Fp, _Alloc, _Rp()>::__clone() const +{ + typedef allocator_traits<_Alloc> __alloc_traits; + typedef typename __rebind_alloc_helper<__alloc_traits, __func>::type _Ap; + _Ap __a(__f_.second()); + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__func, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) __func(__f_.first(), _Alloc(__a)); + return __hold.release(); +} + +template +void +__func<_Fp, _Alloc, _Rp()>::__clone(__base<_Rp()>* __p) const +{ + ::new (__p) __func(__f_.first(), __f_.second()); +} + +template +void +__func<_Fp, _Alloc, _Rp()>::destroy() +{ + __f_.~__compressed_pair<_Fp, _Alloc>(); +} + +template +void +__func<_Fp, _Alloc, _Rp()>::destroy_deallocate() +{ + typedef allocator_traits<_Alloc> __alloc_traits; + typedef typename __rebind_alloc_helper<__alloc_traits, __func>::type _Ap; + _Ap __a(__f_.second()); + __f_.~__compressed_pair<_Fp, _Alloc>(); + __a.deallocate(this, 1); +} + +template +_Rp +__func<_Fp, _Alloc, _Rp()>::operator()() +{ + typedef __invoke_void_return_wrapper<_Rp> _Invoker; + return _Invoker::__call(__f_.first()); +} + +#ifndef _LIBCUDACXX_NO_RTTI + +template +const void* +__func<_Fp, _Alloc, _Rp()>::target(const type_info& __ti) const +{ + if (__ti == typeid(_Fp)) + return &__f_.first(); + return (const void*)0; +} + +template +const std::type_info& +__func<_Fp, _Alloc, _Rp()>::target_type() const +{ + return typeid(_Fp); +} + +#endif // _LIBCUDACXX_NO_RTTI + +template +class __func<_Fp, _Alloc, _Rp(_A0)> + : public __base<_Rp(_A0)> +{ + __compressed_pair<_Fp, _Alloc> __f_; +public: + _LIBCUDACXX_INLINE_VISIBILITY explicit __func(_Fp __f) : __f_(_CUDA_VSTD::move(__f)) {} + _LIBCUDACXX_INLINE_VISIBILITY explicit __func(_Fp __f, _Alloc __a) + : __f_(_CUDA_VSTD::move(__f), _CUDA_VSTD::move(__a)) {} + virtual __base<_Rp(_A0)>* __clone() const; + virtual void __clone(__base<_Rp(_A0)>*) const; + virtual void destroy(); + virtual void destroy_deallocate(); + virtual _Rp operator()(_A0); +#ifndef _LIBCUDACXX_NO_RTTI + virtual const void* target(const type_info&) const; + virtual const std::type_info& target_type() const; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +__base<_Rp(_A0)>* +__func<_Fp, _Alloc, _Rp(_A0)>::__clone() const +{ + typedef allocator_traits<_Alloc> __alloc_traits; + typedef typename __rebind_alloc_helper<__alloc_traits, __func>::type _Ap; + _Ap __a(__f_.second()); + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__func, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) __func(__f_.first(), _Alloc(__a)); + return __hold.release(); +} + +template +void +__func<_Fp, _Alloc, _Rp(_A0)>::__clone(__base<_Rp(_A0)>* __p) const +{ + ::new (__p) __func(__f_.first(), __f_.second()); +} + +template +void +__func<_Fp, _Alloc, _Rp(_A0)>::destroy() +{ + __f_.~__compressed_pair<_Fp, _Alloc>(); +} + +template +void +__func<_Fp, _Alloc, _Rp(_A0)>::destroy_deallocate() +{ + typedef allocator_traits<_Alloc> __alloc_traits; + typedef typename __rebind_alloc_helper<__alloc_traits, __func>::type _Ap; + _Ap __a(__f_.second()); + __f_.~__compressed_pair<_Fp, _Alloc>(); + __a.deallocate(this, 1); +} + +template +_Rp +__func<_Fp, _Alloc, _Rp(_A0)>::operator()(_A0 __a0) +{ + typedef __invoke_void_return_wrapper<_Rp> _Invoker; + return _Invoker::__call(__f_.first(), __a0); +} + +#ifndef _LIBCUDACXX_NO_RTTI + +template +const void* +__func<_Fp, _Alloc, _Rp(_A0)>::target(const type_info& __ti) const +{ + if (__ti == typeid(_Fp)) + return &__f_.first(); + return (const void*)0; +} + +template +const std::type_info& +__func<_Fp, _Alloc, _Rp(_A0)>::target_type() const +{ + return typeid(_Fp); +} + +#endif // _LIBCUDACXX_NO_RTTI + +template +class __func<_Fp, _Alloc, _Rp(_A0, _A1)> + : public __base<_Rp(_A0, _A1)> +{ + __compressed_pair<_Fp, _Alloc> __f_; +public: + _LIBCUDACXX_INLINE_VISIBILITY explicit __func(_Fp __f) : __f_(_CUDA_VSTD::move(__f)) {} + _LIBCUDACXX_INLINE_VISIBILITY explicit __func(_Fp __f, _Alloc __a) + : __f_(_CUDA_VSTD::move(__f), _CUDA_VSTD::move(__a)) {} + virtual __base<_Rp(_A0, _A1)>* __clone() const; + virtual void __clone(__base<_Rp(_A0, _A1)>*) const; + virtual void destroy(); + virtual void destroy_deallocate(); + virtual _Rp operator()(_A0, _A1); +#ifndef _LIBCUDACXX_NO_RTTI + virtual const void* target(const type_info&) const; + virtual const std::type_info& target_type() const; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +__base<_Rp(_A0, _A1)>* +__func<_Fp, _Alloc, _Rp(_A0, _A1)>::__clone() const +{ + typedef allocator_traits<_Alloc> __alloc_traits; + typedef typename __rebind_alloc_helper<__alloc_traits, __func>::type _Ap; + _Ap __a(__f_.second()); + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__func, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) __func(__f_.first(), _Alloc(__a)); + return __hold.release(); +} + +template +void +__func<_Fp, _Alloc, _Rp(_A0, _A1)>::__clone(__base<_Rp(_A0, _A1)>* __p) const +{ + ::new (__p) __func(__f_.first(), __f_.second()); +} + +template +void +__func<_Fp, _Alloc, _Rp(_A0, _A1)>::destroy() +{ + __f_.~__compressed_pair<_Fp, _Alloc>(); +} + +template +void +__func<_Fp, _Alloc, _Rp(_A0, _A1)>::destroy_deallocate() +{ + typedef allocator_traits<_Alloc> __alloc_traits; + typedef typename __rebind_alloc_helper<__alloc_traits, __func>::type _Ap; + _Ap __a(__f_.second()); + __f_.~__compressed_pair<_Fp, _Alloc>(); + __a.deallocate(this, 1); +} + +template +_Rp +__func<_Fp, _Alloc, _Rp(_A0, _A1)>::operator()(_A0 __a0, _A1 __a1) +{ + typedef __invoke_void_return_wrapper<_Rp> _Invoker; + return _Invoker::__call(__f_.first(), __a0, __a1); +} + +#ifndef _LIBCUDACXX_NO_RTTI + +template +const void* +__func<_Fp, _Alloc, _Rp(_A0, _A1)>::target(const type_info& __ti) const +{ + if (__ti == typeid(_Fp)) + return &__f_.first(); + return (const void*)0; +} + +template +const std::type_info& +__func<_Fp, _Alloc, _Rp(_A0, _A1)>::target_type() const +{ + return typeid(_Fp); +} + +#endif // _LIBCUDACXX_NO_RTTI + +template +class __func<_Fp, _Alloc, _Rp(_A0, _A1, _A2)> + : public __base<_Rp(_A0, _A1, _A2)> +{ + __compressed_pair<_Fp, _Alloc> __f_; +public: + _LIBCUDACXX_INLINE_VISIBILITY explicit __func(_Fp __f) : __f_(_CUDA_VSTD::move(__f)) {} + _LIBCUDACXX_INLINE_VISIBILITY explicit __func(_Fp __f, _Alloc __a) + : __f_(_CUDA_VSTD::move(__f), _CUDA_VSTD::move(__a)) {} + virtual __base<_Rp(_A0, _A1, _A2)>* __clone() const; + virtual void __clone(__base<_Rp(_A0, _A1, _A2)>*) const; + virtual void destroy(); + virtual void destroy_deallocate(); + virtual _Rp operator()(_A0, _A1, _A2); +#ifndef _LIBCUDACXX_NO_RTTI + virtual const void* target(const type_info&) const; + virtual const std::type_info& target_type() const; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +__base<_Rp(_A0, _A1, _A2)>* +__func<_Fp, _Alloc, _Rp(_A0, _A1, _A2)>::__clone() const +{ + typedef allocator_traits<_Alloc> __alloc_traits; + typedef typename __rebind_alloc_helper<__alloc_traits, __func>::type _Ap; + _Ap __a(__f_.second()); + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__func, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) __func(__f_.first(), _Alloc(__a)); + return __hold.release(); +} + +template +void +__func<_Fp, _Alloc, _Rp(_A0, _A1, _A2)>::__clone(__base<_Rp(_A0, _A1, _A2)>* __p) const +{ + ::new (__p) __func(__f_.first(), __f_.second()); +} + +template +void +__func<_Fp, _Alloc, _Rp(_A0, _A1, _A2)>::destroy() +{ + __f_.~__compressed_pair<_Fp, _Alloc>(); +} + +template +void +__func<_Fp, _Alloc, _Rp(_A0, _A1, _A2)>::destroy_deallocate() +{ + typedef allocator_traits<_Alloc> __alloc_traits; + typedef typename __rebind_alloc_helper<__alloc_traits, __func>::type _Ap; + _Ap __a(__f_.second()); + __f_.~__compressed_pair<_Fp, _Alloc>(); + __a.deallocate(this, 1); +} + +template +_Rp +__func<_Fp, _Alloc, _Rp(_A0, _A1, _A2)>::operator()(_A0 __a0, _A1 __a1, _A2 __a2) +{ + typedef __invoke_void_return_wrapper<_Rp> _Invoker; + return _Invoker::__call(__f_.first(), __a0, __a1, __a2); +} + +#ifndef _LIBCUDACXX_NO_RTTI + +template +const void* +__func<_Fp, _Alloc, _Rp(_A0, _A1, _A2)>::target(const type_info& __ti) const +{ + if (__ti == typeid(_Fp)) + return &__f_.first(); + return (const void*)0; +} + +template +const std::type_info& +__func<_Fp, _Alloc, _Rp(_A0, _A1, _A2)>::target_type() const +{ + return typeid(_Fp); +} + +#endif // _LIBCUDACXX_NO_RTTI + +} // __function + +template +class _LIBCUDACXX_TEMPLATE_VIS function<_Rp()> +{ + typedef __function::__base<_Rp()> __base; + aligned_storage<3*sizeof(void*)>::type __buf_; + __base* __f_; + +public: + typedef _Rp result_type; + + // 20.7.16.2.1, construct/copy/destroy: + _LIBCUDACXX_INLINE_VISIBILITY explicit function() : __f_(0) {} + _LIBCUDACXX_INLINE_VISIBILITY function(nullptr_t) : __f_(0) {} + function(const function&); + template + function(_Fp, + typename enable_if::value>::type* = 0); + + template + _LIBCUDACXX_INLINE_VISIBILITY + function(allocator_arg_t, const _Alloc&) : __f_(0) {} + template + _LIBCUDACXX_INLINE_VISIBILITY + function(allocator_arg_t, const _Alloc&, nullptr_t) : __f_(0) {} + template + function(allocator_arg_t, const _Alloc&, const function&); + template + function(allocator_arg_t, const _Alloc& __a, _Fp __f, + typename enable_if::value>::type* = 0); + + function& operator=(const function&); + function& operator=(nullptr_t); + template + typename enable_if + < + !is_integral<_Fp>::value, + function& + >::type + operator=(_Fp); + + ~function(); + + // 20.7.16.2.2, function modifiers: + void swap(function&); + template + _LIBCUDACXX_INLINE_VISIBILITY + void assign(_Fp __f, const _Alloc& __a) + {function(allocator_arg, __a, __f).swap(*this);} + + // 20.7.16.2.3, function capacity: + _LIBCUDACXX_INLINE_VISIBILITY operator bool() const {return __f_;} + +private: + // deleted overloads close possible hole in the type system + template + bool operator==(const function<_R2()>&) const;// = delete; + template + bool operator!=(const function<_R2()>&) const;// = delete; +public: + // 20.7.16.2.4, function invocation: + _Rp operator()() const; + +#ifndef _LIBCUDACXX_NO_RTTI + // 20.7.16.2.5, function target access: + const std::type_info& target_type() const; + template _Tp* target(); + template const _Tp* target() const; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +function<_Rp()>::function(const function& __f) +{ + if (__f.__f_ == 0) + __f_ = 0; + else if (__f.__f_ == (const __base*)&__f.__buf_) + { + __f_ = (__base*)&__buf_; + __f.__f_->__clone(__f_); + } + else + __f_ = __f.__f_->__clone(); +} + +template +template +function<_Rp()>::function(allocator_arg_t, const _Alloc&, const function& __f) +{ + if (__f.__f_ == 0) + __f_ = 0; + else if (__f.__f_ == (const __base*)&__f.__buf_) + { + __f_ = (__base*)&__buf_; + __f.__f_->__clone(__f_); + } + else + __f_ = __f.__f_->__clone(); +} + +template +template +function<_Rp()>::function(_Fp __f, + typename enable_if::value>::type*) + : __f_(0) +{ + if (__function::__not_null(__f)) + { + typedef __function::__func<_Fp, allocator<_Fp>, _Rp()> _FF; + if (sizeof(_FF) <= sizeof(__buf_)) + { + __f_ = (__base*)&__buf_; + ::new (__f_) _FF(__f); + } + else + { + typedef allocator<_FF> _Ap; + _Ap __a; + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__base, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) _FF(__f, allocator<_Fp>(__a)); + __f_ = __hold.release(); + } + } +} + +template +template +function<_Rp()>::function(allocator_arg_t, const _Alloc& __a0, _Fp __f, + typename enable_if::value>::type*) + : __f_(0) +{ + typedef allocator_traits<_Alloc> __alloc_traits; + if (__function::__not_null(__f)) + { + typedef __function::__func<_Fp, _Alloc, _Rp()> _FF; + if (sizeof(_FF) <= sizeof(__buf_)) + { + __f_ = (__base*)&__buf_; + ::new (__f_) _FF(__f, __a0); + } + else + { + typedef typename __rebind_alloc_helper<__alloc_traits, _FF>::type _Ap; + _Ap __a(__a0); + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__base, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) _FF(__f, _Alloc(__a)); + __f_ = __hold.release(); + } + } +} + +template +function<_Rp()>& +function<_Rp()>::operator=(const function& __f) +{ + if (__f) + function(__f).swap(*this); + else + *this = nullptr; + return *this; +} + +template +function<_Rp()>& +function<_Rp()>::operator=(nullptr_t) +{ + __base* __t = __f_; + __f_ = 0; + if (__t == (__base*)&__buf_) + __t->destroy(); + else if (__t) + __t->destroy_deallocate(); + return *this; +} + +template +template +typename enable_if +< + !is_integral<_Fp>::value, + function<_Rp()>& +>::type +function<_Rp()>::operator=(_Fp __f) +{ + function(_CUDA_VSTD::move(__f)).swap(*this); + return *this; +} + +template +function<_Rp()>::~function() +{ + if (__f_ == (__base*)&__buf_) + __f_->destroy(); + else if (__f_) + __f_->destroy_deallocate(); +} + +template +void +function<_Rp()>::swap(function& __f) +{ + if (_CUDA_VSTD::addressof(__f) == this) + return; + if (__f_ == (__base*)&__buf_ && __f.__f_ == (__base*)&__f.__buf_) + { + typename aligned_storage::type __tempbuf; + __base* __t = (__base*)&__tempbuf; + __f_->__clone(__t); + __f_->destroy(); + __f_ = 0; + __f.__f_->__clone((__base*)&__buf_); + __f.__f_->destroy(); + __f.__f_ = 0; + __f_ = (__base*)&__buf_; + __t->__clone((__base*)&__f.__buf_); + __t->destroy(); + __f.__f_ = (__base*)&__f.__buf_; + } + else if (__f_ == (__base*)&__buf_) + { + __f_->__clone((__base*)&__f.__buf_); + __f_->destroy(); + __f_ = __f.__f_; + __f.__f_ = (__base*)&__f.__buf_; + } + else if (__f.__f_ == (__base*)&__f.__buf_) + { + __f.__f_->__clone((__base*)&__buf_); + __f.__f_->destroy(); + __f.__f_ = __f_; + __f_ = (__base*)&__buf_; + } + else + _CUDA_VSTD::swap(__f_, __f.__f_); +} + +template +_Rp +function<_Rp()>::operator()() const +{ + if (__f_ == 0) + __throw_bad_function_call(); + return (*__f_)(); +} + +#ifndef _LIBCUDACXX_NO_RTTI + +template +const std::type_info& +function<_Rp()>::target_type() const +{ + if (__f_ == 0) + return typeid(void); + return __f_->target_type(); +} + +template +template +_Tp* +function<_Rp()>::target() +{ + if (__f_ == 0) + return (_Tp*)0; + return (_Tp*) const_cast(__f_->target(typeid(_Tp))); +} + +template +template +const _Tp* +function<_Rp()>::target() const +{ + if (__f_ == 0) + return (const _Tp*)0; + return (const _Tp*)__f_->target(typeid(_Tp)); +} + +#endif // _LIBCUDACXX_NO_RTTI + +template +class _LIBCUDACXX_TEMPLATE_VIS function<_Rp(_A0)> + : public unary_function<_A0, _Rp> +{ + typedef __function::__base<_Rp(_A0)> __base; + aligned_storage<3*sizeof(void*)>::type __buf_; + __base* __f_; + +public: + typedef _Rp result_type; + + // 20.7.16.2.1, construct/copy/destroy: + _LIBCUDACXX_INLINE_VISIBILITY explicit function() : __f_(0) {} + _LIBCUDACXX_INLINE_VISIBILITY function(nullptr_t) : __f_(0) {} + function(const function&); + template + function(_Fp, + typename enable_if::value>::type* = 0); + + template + _LIBCUDACXX_INLINE_VISIBILITY + function(allocator_arg_t, const _Alloc&) : __f_(0) {} + template + _LIBCUDACXX_INLINE_VISIBILITY + function(allocator_arg_t, const _Alloc&, nullptr_t) : __f_(0) {} + template + function(allocator_arg_t, const _Alloc&, const function&); + template + function(allocator_arg_t, const _Alloc& __a, _Fp __f, + typename enable_if::value>::type* = 0); + + function& operator=(const function&); + function& operator=(nullptr_t); + template + typename enable_if + < + !is_integral<_Fp>::value, + function& + >::type + operator=(_Fp); + + ~function(); + + // 20.7.16.2.2, function modifiers: + void swap(function&); + template + _LIBCUDACXX_INLINE_VISIBILITY + void assign(_Fp __f, const _Alloc& __a) + {function(allocator_arg, __a, __f).swap(*this);} + + // 20.7.16.2.3, function capacity: + _LIBCUDACXX_INLINE_VISIBILITY operator bool() const {return __f_;} + +private: + // deleted overloads close possible hole in the type system + template + bool operator==(const function<_R2(_B0)>&) const;// = delete; + template + bool operator!=(const function<_R2(_B0)>&) const;// = delete; +public: + // 20.7.16.2.4, function invocation: + _Rp operator()(_A0) const; + +#ifndef _LIBCUDACXX_NO_RTTI + // 20.7.16.2.5, function target access: + const std::type_info& target_type() const; + template _Tp* target(); + template const _Tp* target() const; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +function<_Rp(_A0)>::function(const function& __f) +{ + if (__f.__f_ == 0) + __f_ = 0; + else if (__f.__f_ == (const __base*)&__f.__buf_) + { + __f_ = (__base*)&__buf_; + __f.__f_->__clone(__f_); + } + else + __f_ = __f.__f_->__clone(); +} + +template +template +function<_Rp(_A0)>::function(allocator_arg_t, const _Alloc&, const function& __f) +{ + if (__f.__f_ == 0) + __f_ = 0; + else if (__f.__f_ == (const __base*)&__f.__buf_) + { + __f_ = (__base*)&__buf_; + __f.__f_->__clone(__f_); + } + else + __f_ = __f.__f_->__clone(); +} + +template +template +function<_Rp(_A0)>::function(_Fp __f, + typename enable_if::value>::type*) + : __f_(0) +{ + if (__function::__not_null(__f)) + { + typedef __function::__func<_Fp, allocator<_Fp>, _Rp(_A0)> _FF; + if (sizeof(_FF) <= sizeof(__buf_)) + { + __f_ = (__base*)&__buf_; + ::new (__f_) _FF(__f); + } + else + { + typedef allocator<_FF> _Ap; + _Ap __a; + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__base, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) _FF(__f, allocator<_Fp>(__a)); + __f_ = __hold.release(); + } + } +} + +template +template +function<_Rp(_A0)>::function(allocator_arg_t, const _Alloc& __a0, _Fp __f, + typename enable_if::value>::type*) + : __f_(0) +{ + typedef allocator_traits<_Alloc> __alloc_traits; + if (__function::__not_null(__f)) + { + typedef __function::__func<_Fp, _Alloc, _Rp(_A0)> _FF; + if (sizeof(_FF) <= sizeof(__buf_)) + { + __f_ = (__base*)&__buf_; + ::new (__f_) _FF(__f, __a0); + } + else + { + typedef typename __rebind_alloc_helper<__alloc_traits, _FF>::type _Ap; + _Ap __a(__a0); + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__base, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) _FF(__f, _Alloc(__a)); + __f_ = __hold.release(); + } + } +} + +template +function<_Rp(_A0)>& +function<_Rp(_A0)>::operator=(const function& __f) +{ + if (__f) + function(__f).swap(*this); + else + *this = nullptr; + return *this; +} + +template +function<_Rp(_A0)>& +function<_Rp(_A0)>::operator=(nullptr_t) +{ + __base* __t = __f_; + __f_ = 0; + if (__t == (__base*)&__buf_) + __t->destroy(); + else if (__t) + __t->destroy_deallocate(); + return *this; +} + +template +template +typename enable_if +< + !is_integral<_Fp>::value, + function<_Rp(_A0)>& +>::type +function<_Rp(_A0)>::operator=(_Fp __f) +{ + function(_CUDA_VSTD::move(__f)).swap(*this); + return *this; +} + +template +function<_Rp(_A0)>::~function() +{ + if (__f_ == (__base*)&__buf_) + __f_->destroy(); + else if (__f_) + __f_->destroy_deallocate(); +} + +template +void +function<_Rp(_A0)>::swap(function& __f) +{ + if (_CUDA_VSTD::addressof(__f) == this) + return; + if (__f_ == (__base*)&__buf_ && __f.__f_ == (__base*)&__f.__buf_) + { + typename aligned_storage::type __tempbuf; + __base* __t = (__base*)&__tempbuf; + __f_->__clone(__t); + __f_->destroy(); + __f_ = 0; + __f.__f_->__clone((__base*)&__buf_); + __f.__f_->destroy(); + __f.__f_ = 0; + __f_ = (__base*)&__buf_; + __t->__clone((__base*)&__f.__buf_); + __t->destroy(); + __f.__f_ = (__base*)&__f.__buf_; + } + else if (__f_ == (__base*)&__buf_) + { + __f_->__clone((__base*)&__f.__buf_); + __f_->destroy(); + __f_ = __f.__f_; + __f.__f_ = (__base*)&__f.__buf_; + } + else if (__f.__f_ == (__base*)&__f.__buf_) + { + __f.__f_->__clone((__base*)&__buf_); + __f.__f_->destroy(); + __f.__f_ = __f_; + __f_ = (__base*)&__buf_; + } + else + _CUDA_VSTD::swap(__f_, __f.__f_); +} + +template +_Rp +function<_Rp(_A0)>::operator()(_A0 __a0) const +{ + if (__f_ == 0) + __throw_bad_function_call(); + return (*__f_)(__a0); +} + +#ifndef _LIBCUDACXX_NO_RTTI + +template +const std::type_info& +function<_Rp(_A0)>::target_type() const +{ + if (__f_ == 0) + return typeid(void); + return __f_->target_type(); +} + +template +template +_Tp* +function<_Rp(_A0)>::target() +{ + if (__f_ == 0) + return (_Tp*)0; + return (_Tp*) const_cast(__f_->target(typeid(_Tp))); +} + +template +template +const _Tp* +function<_Rp(_A0)>::target() const +{ + if (__f_ == 0) + return (const _Tp*)0; + return (const _Tp*)__f_->target(typeid(_Tp)); +} + +#endif // _LIBCUDACXX_NO_RTTI + +template +class _LIBCUDACXX_TEMPLATE_VIS function<_Rp(_A0, _A1)> + : public binary_function<_A0, _A1, _Rp> +{ + typedef __function::__base<_Rp(_A0, _A1)> __base; + aligned_storage<3*sizeof(void*)>::type __buf_; + __base* __f_; + +public: + typedef _Rp result_type; + + // 20.7.16.2.1, construct/copy/destroy: + _LIBCUDACXX_INLINE_VISIBILITY explicit function() : __f_(0) {} + _LIBCUDACXX_INLINE_VISIBILITY function(nullptr_t) : __f_(0) {} + function(const function&); + template + function(_Fp, + typename enable_if::value>::type* = 0); + + template + _LIBCUDACXX_INLINE_VISIBILITY + function(allocator_arg_t, const _Alloc&) : __f_(0) {} + template + _LIBCUDACXX_INLINE_VISIBILITY + function(allocator_arg_t, const _Alloc&, nullptr_t) : __f_(0) {} + template + function(allocator_arg_t, const _Alloc&, const function&); + template + function(allocator_arg_t, const _Alloc& __a, _Fp __f, + typename enable_if::value>::type* = 0); + + function& operator=(const function&); + function& operator=(nullptr_t); + template + typename enable_if + < + !is_integral<_Fp>::value, + function& + >::type + operator=(_Fp); + + ~function(); + + // 20.7.16.2.2, function modifiers: + void swap(function&); + template + _LIBCUDACXX_INLINE_VISIBILITY + void assign(_Fp __f, const _Alloc& __a) + {function(allocator_arg, __a, __f).swap(*this);} + + // 20.7.16.2.3, function capacity: + operator bool() const {return __f_;} + +private: + // deleted overloads close possible hole in the type system + template + bool operator==(const function<_R2(_B0, _B1)>&) const;// = delete; + template + bool operator!=(const function<_R2(_B0, _B1)>&) const;// = delete; +public: + // 20.7.16.2.4, function invocation: + _Rp operator()(_A0, _A1) const; + +#ifndef _LIBCUDACXX_NO_RTTI + // 20.7.16.2.5, function target access: + const std::type_info& target_type() const; + template _Tp* target(); + template const _Tp* target() const; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +function<_Rp(_A0, _A1)>::function(const function& __f) +{ + if (__f.__f_ == 0) + __f_ = 0; + else if (__f.__f_ == (const __base*)&__f.__buf_) + { + __f_ = (__base*)&__buf_; + __f.__f_->__clone(__f_); + } + else + __f_ = __f.__f_->__clone(); +} + +template +template +function<_Rp(_A0, _A1)>::function(allocator_arg_t, const _Alloc&, const function& __f) +{ + if (__f.__f_ == 0) + __f_ = 0; + else if (__f.__f_ == (const __base*)&__f.__buf_) + { + __f_ = (__base*)&__buf_; + __f.__f_->__clone(__f_); + } + else + __f_ = __f.__f_->__clone(); +} + +template +template +function<_Rp(_A0, _A1)>::function(_Fp __f, + typename enable_if::value>::type*) + : __f_(0) +{ + if (__function::__not_null(__f)) + { + typedef __function::__func<_Fp, allocator<_Fp>, _Rp(_A0, _A1)> _FF; + if (sizeof(_FF) <= sizeof(__buf_)) + { + __f_ = (__base*)&__buf_; + ::new (__f_) _FF(__f); + } + else + { + typedef allocator<_FF> _Ap; + _Ap __a; + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__base, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) _FF(__f, allocator<_Fp>(__a)); + __f_ = __hold.release(); + } + } +} + +template +template +function<_Rp(_A0, _A1)>::function(allocator_arg_t, const _Alloc& __a0, _Fp __f, + typename enable_if::value>::type*) + : __f_(0) +{ + typedef allocator_traits<_Alloc> __alloc_traits; + if (__function::__not_null(__f)) + { + typedef __function::__func<_Fp, _Alloc, _Rp(_A0, _A1)> _FF; + if (sizeof(_FF) <= sizeof(__buf_)) + { + __f_ = (__base*)&__buf_; + ::new (__f_) _FF(__f, __a0); + } + else + { + typedef typename __rebind_alloc_helper<__alloc_traits, _FF>::type _Ap; + _Ap __a(__a0); + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__base, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) _FF(__f, _Alloc(__a)); + __f_ = __hold.release(); + } + } +} + +template +function<_Rp(_A0, _A1)>& +function<_Rp(_A0, _A1)>::operator=(const function& __f) +{ + if (__f) + function(__f).swap(*this); + else + *this = nullptr; + return *this; +} + +template +function<_Rp(_A0, _A1)>& +function<_Rp(_A0, _A1)>::operator=(nullptr_t) +{ + __base* __t = __f_; + __f_ = 0; + if (__t == (__base*)&__buf_) + __t->destroy(); + else if (__t) + __t->destroy_deallocate(); + return *this; +} + +template +template +typename enable_if +< + !is_integral<_Fp>::value, + function<_Rp(_A0, _A1)>& +>::type +function<_Rp(_A0, _A1)>::operator=(_Fp __f) +{ + function(_CUDA_VSTD::move(__f)).swap(*this); + return *this; +} + +template +function<_Rp(_A0, _A1)>::~function() +{ + if (__f_ == (__base*)&__buf_) + __f_->destroy(); + else if (__f_) + __f_->destroy_deallocate(); +} + +template +void +function<_Rp(_A0, _A1)>::swap(function& __f) +{ + if (_CUDA_VSTD::addressof(__f) == this) + return; + if (__f_ == (__base*)&__buf_ && __f.__f_ == (__base*)&__f.__buf_) + { + typename aligned_storage::type __tempbuf; + __base* __t = (__base*)&__tempbuf; + __f_->__clone(__t); + __f_->destroy(); + __f_ = 0; + __f.__f_->__clone((__base*)&__buf_); + __f.__f_->destroy(); + __f.__f_ = 0; + __f_ = (__base*)&__buf_; + __t->__clone((__base*)&__f.__buf_); + __t->destroy(); + __f.__f_ = (__base*)&__f.__buf_; + } + else if (__f_ == (__base*)&__buf_) + { + __f_->__clone((__base*)&__f.__buf_); + __f_->destroy(); + __f_ = __f.__f_; + __f.__f_ = (__base*)&__f.__buf_; + } + else if (__f.__f_ == (__base*)&__f.__buf_) + { + __f.__f_->__clone((__base*)&__buf_); + __f.__f_->destroy(); + __f.__f_ = __f_; + __f_ = (__base*)&__buf_; + } + else + _CUDA_VSTD::swap(__f_, __f.__f_); +} + +template +_Rp +function<_Rp(_A0, _A1)>::operator()(_A0 __a0, _A1 __a1) const +{ + if (__f_ == 0) + __throw_bad_function_call(); + return (*__f_)(__a0, __a1); +} + +#ifndef _LIBCUDACXX_NO_RTTI + +template +const std::type_info& +function<_Rp(_A0, _A1)>::target_type() const +{ + if (__f_ == 0) + return typeid(void); + return __f_->target_type(); +} + +template +template +_Tp* +function<_Rp(_A0, _A1)>::target() +{ + if (__f_ == 0) + return (_Tp*)0; + return (_Tp*) const_cast(__f_->target(typeid(_Tp))); +} + +template +template +const _Tp* +function<_Rp(_A0, _A1)>::target() const +{ + if (__f_ == 0) + return (const _Tp*)0; + return (const _Tp*)__f_->target(typeid(_Tp)); +} + +#endif // _LIBCUDACXX_NO_RTTI + +template +class _LIBCUDACXX_TEMPLATE_VIS function<_Rp(_A0, _A1, _A2)> +{ + typedef __function::__base<_Rp(_A0, _A1, _A2)> __base; + aligned_storage<3*sizeof(void*)>::type __buf_; + __base* __f_; + +public: + typedef _Rp result_type; + + // 20.7.16.2.1, construct/copy/destroy: + _LIBCUDACXX_INLINE_VISIBILITY explicit function() : __f_(0) {} + _LIBCUDACXX_INLINE_VISIBILITY function(nullptr_t) : __f_(0) {} + function(const function&); + template + function(_Fp, + typename enable_if::value>::type* = 0); + + template + _LIBCUDACXX_INLINE_VISIBILITY + function(allocator_arg_t, const _Alloc&) : __f_(0) {} + template + _LIBCUDACXX_INLINE_VISIBILITY + function(allocator_arg_t, const _Alloc&, nullptr_t) : __f_(0) {} + template + function(allocator_arg_t, const _Alloc&, const function&); + template + function(allocator_arg_t, const _Alloc& __a, _Fp __f, + typename enable_if::value>::type* = 0); + + function& operator=(const function&); + function& operator=(nullptr_t); + template + typename enable_if + < + !is_integral<_Fp>::value, + function& + >::type + operator=(_Fp); + + ~function(); + + // 20.7.16.2.2, function modifiers: + void swap(function&); + template + _LIBCUDACXX_INLINE_VISIBILITY + void assign(_Fp __f, const _Alloc& __a) + {function(allocator_arg, __a, __f).swap(*this);} + + // 20.7.16.2.3, function capacity: + _LIBCUDACXX_INLINE_VISIBILITY operator bool() const {return __f_;} + +private: + // deleted overloads close possible hole in the type system + template + bool operator==(const function<_R2(_B0, _B1, _B2)>&) const;// = delete; + template + bool operator!=(const function<_R2(_B0, _B1, _B2)>&) const;// = delete; +public: + // 20.7.16.2.4, function invocation: + _Rp operator()(_A0, _A1, _A2) const; + +#ifndef _LIBCUDACXX_NO_RTTI + // 20.7.16.2.5, function target access: + const std::type_info& target_type() const; + template _Tp* target(); + template const _Tp* target() const; +#endif // _LIBCUDACXX_NO_RTTI +}; + +template +function<_Rp(_A0, _A1, _A2)>::function(const function& __f) +{ + if (__f.__f_ == 0) + __f_ = 0; + else if (__f.__f_ == (const __base*)&__f.__buf_) + { + __f_ = (__base*)&__buf_; + __f.__f_->__clone(__f_); + } + else + __f_ = __f.__f_->__clone(); +} + +template +template +function<_Rp(_A0, _A1, _A2)>::function(allocator_arg_t, const _Alloc&, + const function& __f) +{ + if (__f.__f_ == 0) + __f_ = 0; + else if (__f.__f_ == (const __base*)&__f.__buf_) + { + __f_ = (__base*)&__buf_; + __f.__f_->__clone(__f_); + } + else + __f_ = __f.__f_->__clone(); +} + +template +template +function<_Rp(_A0, _A1, _A2)>::function(_Fp __f, + typename enable_if::value>::type*) + : __f_(0) +{ + if (__function::__not_null(__f)) + { + typedef __function::__func<_Fp, allocator<_Fp>, _Rp(_A0, _A1, _A2)> _FF; + if (sizeof(_FF) <= sizeof(__buf_)) + { + __f_ = (__base*)&__buf_; + ::new (__f_) _FF(__f); + } + else + { + typedef allocator<_FF> _Ap; + _Ap __a; + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__base, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) _FF(__f, allocator<_Fp>(__a)); + __f_ = __hold.release(); + } + } +} + +template +template +function<_Rp(_A0, _A1, _A2)>::function(allocator_arg_t, const _Alloc& __a0, _Fp __f, + typename enable_if::value>::type*) + : __f_(0) +{ + typedef allocator_traits<_Alloc> __alloc_traits; + if (__function::__not_null(__f)) + { + typedef __function::__func<_Fp, _Alloc, _Rp(_A0, _A1, _A2)> _FF; + if (sizeof(_FF) <= sizeof(__buf_)) + { + __f_ = (__base*)&__buf_; + ::new (__f_) _FF(__f, __a0); + } + else + { + typedef typename __rebind_alloc_helper<__alloc_traits, _FF>::type _Ap; + _Ap __a(__a0); + typedef __allocator_destructor<_Ap> _Dp; + unique_ptr<__base, _Dp> __hold(__a.allocate(1), _Dp(__a, 1)); + ::new (__hold.get()) _FF(__f, _Alloc(__a)); + __f_ = __hold.release(); + } + } +} + +template +function<_Rp(_A0, _A1, _A2)>& +function<_Rp(_A0, _A1, _A2)>::operator=(const function& __f) +{ + if (__f) + function(__f).swap(*this); + else + *this = nullptr; + return *this; +} + +template +function<_Rp(_A0, _A1, _A2)>& +function<_Rp(_A0, _A1, _A2)>::operator=(nullptr_t) +{ + __base* __t = __f_; + __f_ = 0; + if (__t == (__base*)&__buf_) + __t->destroy(); + else if (__t) + __t->destroy_deallocate(); + return *this; +} + +template +template +typename enable_if +< + !is_integral<_Fp>::value, + function<_Rp(_A0, _A1, _A2)>& +>::type +function<_Rp(_A0, _A1, _A2)>::operator=(_Fp __f) +{ + function(_CUDA_VSTD::move(__f)).swap(*this); + return *this; +} + +template +function<_Rp(_A0, _A1, _A2)>::~function() +{ + if (__f_ == (__base*)&__buf_) + __f_->destroy(); + else if (__f_) + __f_->destroy_deallocate(); +} + +template +void +function<_Rp(_A0, _A1, _A2)>::swap(function& __f) +{ + if (_CUDA_VSTD::addressof(__f) == this) + return; + if (__f_ == (__base*)&__buf_ && __f.__f_ == (__base*)&__f.__buf_) + { + typename aligned_storage::type __tempbuf; + __base* __t = (__base*)&__tempbuf; + __f_->__clone(__t); + __f_->destroy(); + __f_ = 0; + __f.__f_->__clone((__base*)&__buf_); + __f.__f_->destroy(); + __f.__f_ = 0; + __f_ = (__base*)&__buf_; + __t->__clone((__base*)&__f.__buf_); + __t->destroy(); + __f.__f_ = (__base*)&__f.__buf_; + } + else if (__f_ == (__base*)&__buf_) + { + __f_->__clone((__base*)&__f.__buf_); + __f_->destroy(); + __f_ = __f.__f_; + __f.__f_ = (__base*)&__f.__buf_; + } + else if (__f.__f_ == (__base*)&__f.__buf_) + { + __f.__f_->__clone((__base*)&__buf_); + __f.__f_->destroy(); + __f.__f_ = __f_; + __f_ = (__base*)&__buf_; + } + else + _CUDA_VSTD::swap(__f_, __f.__f_); +} + +template +_Rp +function<_Rp(_A0, _A1, _A2)>::operator()(_A0 __a0, _A1 __a1, _A2 __a2) const +{ + if (__f_ == 0) + __throw_bad_function_call(); + return (*__f_)(__a0, __a1, __a2); +} + +#ifndef _LIBCUDACXX_NO_RTTI + +template +const std::type_info& +function<_Rp(_A0, _A1, _A2)>::target_type() const +{ + if (__f_ == 0) + return typeid(void); + return __f_->target_type(); +} + +template +template +_Tp* +function<_Rp(_A0, _A1, _A2)>::target() +{ + if (__f_ == 0) + return (_Tp*)0; + return (_Tp*) const_cast(__f_->target(typeid(_Tp))); +} + +template +template +const _Tp* +function<_Rp(_A0, _A1, _A2)>::target() const +{ + if (__f_ == 0) + return (const _Tp*)0; + return (const _Tp*)__f_->target(typeid(_Tp)); +} + +#endif // _LIBCUDACXX_NO_RTTI + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +operator==(const function<_Fp>& __f, nullptr_t) {return !__f;} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +operator==(nullptr_t, const function<_Fp>& __f) {return !__f;} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +operator!=(const function<_Fp>& __f, nullptr_t) {return (bool)__f;} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +operator!=(nullptr_t, const function<_Fp>& __f) {return (bool)__f;} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(function<_Fp>& __x, function<_Fp>& __y) +{return __x.swap(__y);} + +#include <__pragma_pop> + +#endif // _LIBCUDACXX_FUNCTIONAL_03 diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base new file mode 100644 index 000000000000..d6597340624a --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base @@ -0,0 +1,673 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX_FUNCTIONAL_BASE +#define _LIBCUDACXX_FUNCTIONAL_BASE + +#ifndef __cuda_std__ +#include <__config> +#include +#include +#include +#include +#include +#include <__pragma_push> +#endif //__cuda_std__ + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +template +struct _LIBCUDACXX_TEMPLATE_VIS binary_function +{ + typedef _Arg1 first_argument_type; + typedef _Arg2 second_argument_type; + typedef _Result result_type; +}; + +template +struct __has_result_type +{ +private: + struct __two {char __lx; char __lxx;}; + template _LIBCUDACXX_INLINE_VISIBILITY static __two __test(...); + template _LIBCUDACXX_INLINE_VISIBILITY static char __test(typename _Up::result_type* = 0); +public: + static const bool value = sizeof(__test<_Tp>(0)) == 1; +}; + +#if _LIBCUDACXX_STD_VER > 11 +template +#else +template +#endif +struct _LIBCUDACXX_TEMPLATE_VIS less : binary_function<_Tp, _Tp, bool> +{ + _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY + bool operator()(const _Tp& __x, const _Tp& __y) const + {return __x < __y;} +}; + +#if _LIBCUDACXX_STD_VER > 11 +template <> +struct _LIBCUDACXX_TEMPLATE_VIS less +{ + template + _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY + auto operator()(_T1&& __t, _T2&& __u) const + _NOEXCEPT_(noexcept(_CUDA_VSTD::forward<_T1>(__t) < _CUDA_VSTD::forward<_T2>(__u))) + -> decltype (_CUDA_VSTD::forward<_T1>(__t) < _CUDA_VSTD::forward<_T2>(__u)) + { return _CUDA_VSTD::forward<_T1>(__t) < _CUDA_VSTD::forward<_T2>(__u); } + typedef void is_transparent; +}; +#endif + +// __weak_result_type + +template +struct __derives_from_unary_function +{ +private: + struct __two {char __lx; char __lxx;}; + _LIBCUDACXX_INLINE_VISIBILITY + static __two __test(...); + template + _LIBCUDACXX_INLINE_VISIBILITY + static unary_function<_Ap, _Rp> + __test(const volatile unary_function<_Ap, _Rp>*); +public: + static const bool value = !is_same::value; + typedef decltype(__test((_Tp*)0)) type; +}; + +template +struct __derives_from_binary_function +{ +private: + struct __two {char __lx; char __lxx;}; + _LIBCUDACXX_INLINE_VISIBILITY + static __two __test(...); + template + _LIBCUDACXX_INLINE_VISIBILITY + static binary_function<_A1, _A2, _Rp> + __test(const volatile binary_function<_A1, _A2, _Rp>*); +public: + static const bool value = !is_same::value; + typedef decltype(__test((_Tp*)0)) type; +}; + +template ::value> +struct __maybe_derive_from_unary_function // bool is true + : public __derives_from_unary_function<_Tp>::type +{ +}; + +template +struct __maybe_derive_from_unary_function<_Tp, false> +{ +}; + +template ::value> +struct __maybe_derive_from_binary_function // bool is true + : public __derives_from_binary_function<_Tp>::type +{ +}; + +template +struct __maybe_derive_from_binary_function<_Tp, false> +{ +}; + +template ::value> +struct __weak_result_type_imp // bool is true + : public __maybe_derive_from_unary_function<_Tp>, + public __maybe_derive_from_binary_function<_Tp> +{ + typedef _LIBCUDACXX_NODEBUG_TYPE typename _Tp::result_type result_type; +}; + +template +struct __weak_result_type_imp<_Tp, false> + : public __maybe_derive_from_unary_function<_Tp>, + public __maybe_derive_from_binary_function<_Tp> +{ +}; + +template +struct __weak_result_type + : public __weak_result_type_imp<_Tp> +{ +}; + +// 0 argument case + +template +struct __weak_result_type<_Rp ()> +{ + typedef _LIBCUDACXX_NODEBUG_TYPE _Rp result_type; +}; + +template +struct __weak_result_type<_Rp (&)()> +{ + typedef _LIBCUDACXX_NODEBUG_TYPE _Rp result_type; +}; + +template +struct __weak_result_type<_Rp (*)()> +{ + typedef _LIBCUDACXX_NODEBUG_TYPE _Rp result_type; +}; + +// 1 argument case + +template +struct __weak_result_type<_Rp (_A1)> + : public unary_function<_A1, _Rp> +{ +}; + +template +struct __weak_result_type<_Rp (&)(_A1)> + : public unary_function<_A1, _Rp> +{ +}; + +template +struct __weak_result_type<_Rp (*)(_A1)> + : public unary_function<_A1, _Rp> +{ +}; + +template +struct __weak_result_type<_Rp (_Cp::*)()> + : public unary_function<_Cp*, _Rp> +{ +}; + +template +struct __weak_result_type<_Rp (_Cp::*)() const> + : public unary_function +{ +}; + +template +struct __weak_result_type<_Rp (_Cp::*)() volatile> + : public unary_function +{ +}; + +template +struct __weak_result_type<_Rp (_Cp::*)() const volatile> + : public unary_function +{ +}; + +// 2 argument case + +template +struct __weak_result_type<_Rp (_A1, _A2)> + : public binary_function<_A1, _A2, _Rp> +{ +}; + +template +struct __weak_result_type<_Rp (*)(_A1, _A2)> + : public binary_function<_A1, _A2, _Rp> +{ +}; + +template +struct __weak_result_type<_Rp (&)(_A1, _A2)> + : public binary_function<_A1, _A2, _Rp> +{ +}; + +template +struct __weak_result_type<_Rp (_Cp::*)(_A1)> + : public binary_function<_Cp*, _A1, _Rp> +{ +}; + +template +struct __weak_result_type<_Rp (_Cp::*)(_A1) const> + : public binary_function +{ +}; + +template +struct __weak_result_type<_Rp (_Cp::*)(_A1) volatile> + : public binary_function +{ +}; + +template +struct __weak_result_type<_Rp (_Cp::*)(_A1) const volatile> + : public binary_function +{ +}; + + +#ifndef _LIBCUDACXX_CXX03_LANG +// 3 or more arguments + +template +struct __weak_result_type<_Rp (_A1, _A2, _A3, _A4...)> +{ + typedef _Rp result_type; +}; + +template +struct __weak_result_type<_Rp (&)(_A1, _A2, _A3, _A4...)> +{ + typedef _Rp result_type; +}; + +template +struct __weak_result_type<_Rp (*)(_A1, _A2, _A3, _A4...)> +{ + typedef _Rp result_type; +}; + +template +struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...)> +{ + typedef _Rp result_type; +}; + +template +struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...) const> +{ + typedef _Rp result_type; +}; + +template +struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...) volatile> +{ + typedef _Rp result_type; +}; + +template +struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...) const volatile> +{ + typedef _Rp result_type; +}; + +template +struct __invoke_return +{ + typedef decltype(__invoke(_CUDA_VSTD::declval<_Tp>(), _CUDA_VSTD::declval<_Args>()...)) type; +}; + +#else // defined(_LIBCUDACXX_CXX03_LANG) + +#include <__functional_base_03> + +#endif // !defined(_LIBCUDACXX_CXX03_LANG) + + +template +struct __invoke_void_return_wrapper +{ +#ifndef _LIBCUDACXX_CXX03_LANG + template + _LIBCUDACXX_INLINE_VISIBILITY + static _Ret __call(_Args&&... __args) { + return __invoke(_CUDA_VSTD::forward<_Args>(__args)...); + } +#else + template + _LIBCUDACXX_INLINE_VISIBILITY + static _Ret __call(_Fn __f) { + return __invoke(__f); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + static _Ret __call(_Fn __f, _A0& __a0) { + return __invoke(__f, __a0); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + static _Ret __call(_Fn __f, _A0& __a0, _A1& __a1) { + return __invoke(__f, __a0, __a1); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + static _Ret __call(_Fn __f, _A0& __a0, _A1& __a1, _A2& __a2){ + return __invoke(__f, __a0, __a1, __a2); + } +#endif +}; + +template <> +struct __invoke_void_return_wrapper +{ +#ifndef _LIBCUDACXX_CXX03_LANG + template + _LIBCUDACXX_INLINE_VISIBILITY + static void __call(_Args&&... __args) { + __invoke(_CUDA_VSTD::forward<_Args>(__args)...); + } +#else + template + _LIBCUDACXX_INLINE_VISIBILITY + static void __call(_Fn __f) { + __invoke(__f); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + static void __call(_Fn __f, _A0& __a0) { + __invoke(__f, __a0); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + static void __call(_Fn __f, _A0& __a0, _A1& __a1) { + __invoke(__f, __a0, __a1); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + static void __call(_Fn __f, _A0& __a0, _A1& __a1, _A2& __a2) { + __invoke(__f, __a0, __a1, __a2); + } +#endif +}; + +template +class _LIBCUDACXX_TEMPLATE_VIS reference_wrapper + : public __weak_result_type<_Tp> +{ +public: + // types + typedef _Tp type; +private: + type* __f_; + +public: + // construct/copy/destroy + _LIBCUDACXX_INLINE_VISIBILITY reference_wrapper(type& __f) _NOEXCEPT + : __f_(_CUDA_VSTD::addressof(__f)) {} +#ifndef _LIBCUDACXX_CXX03_LANG + private: _LIBCUDACXX_INLINE_VISIBILITY reference_wrapper(type&&); public: // = delete; // do not bind to temps +#endif + + // access + _LIBCUDACXX_INLINE_VISIBILITY operator type& () const _NOEXCEPT {return *__f_;} + _LIBCUDACXX_INLINE_VISIBILITY type& get() const _NOEXCEPT {return *__f_;} + +#ifndef _LIBCUDACXX_CXX03_LANG + // invoke + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_of::type + operator() (_ArgTypes&&... __args) const { + return __invoke(get(), _CUDA_VSTD::forward<_ArgTypes>(__args)...); + } +#else + + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return::type + operator() () const { + return __invoke(get()); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return0::type + operator() (_A0& __a0) const { + return __invoke(get(), __a0); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return0::type + operator() (_A0 const& __a0) const { + return __invoke(get(), __a0); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return1::type + operator() (_A0& __a0, _A1& __a1) const { + return __invoke(get(), __a0, __a1); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return1::type + operator() (_A0 const& __a0, _A1& __a1) const { + return __invoke(get(), __a0, __a1); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return1::type + operator() (_A0& __a0, _A1 const& __a1) const { + return __invoke(get(), __a0, __a1); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return1::type + operator() (_A0 const& __a0, _A1 const& __a1) const { + return __invoke(get(), __a0, __a1); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return2::type + operator() (_A0& __a0, _A1& __a1, _A2& __a2) const { + return __invoke(get(), __a0, __a1, __a2); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return2::type + operator() (_A0 const& __a0, _A1& __a1, _A2& __a2) const { + return __invoke(get(), __a0, __a1, __a2); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return2::type + operator() (_A0& __a0, _A1 const& __a1, _A2& __a2) const { + return __invoke(get(), __a0, __a1, __a2); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return2::type + operator() (_A0& __a0, _A1& __a1, _A2 const& __a2) const { + return __invoke(get(), __a0, __a1, __a2); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return2::type + operator() (_A0 const& __a0, _A1 const& __a1, _A2& __a2) const { + return __invoke(get(), __a0, __a1, __a2); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return2::type + operator() (_A0 const& __a0, _A1& __a1, _A2 const& __a2) const { + return __invoke(get(), __a0, __a1, __a2); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return2::type + operator() (_A0& __a0, _A1 const& __a1, _A2 const& __a2) const { + return __invoke(get(), __a0, __a1, __a2); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename __invoke_return2::type + operator() (_A0 const& __a0, _A1 const& __a1, _A2 const& __a2) const { + return __invoke(get(), __a0, __a1, __a2); + } +#endif // _LIBCUDACXX_CXX03_LANG +}; + + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +reference_wrapper<_Tp> +ref(_Tp& __t) _NOEXCEPT +{ + return reference_wrapper<_Tp>(__t); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +reference_wrapper<_Tp> +ref(reference_wrapper<_Tp> __t) _NOEXCEPT +{ + return ref(__t.get()); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +reference_wrapper +cref(const _Tp& __t) _NOEXCEPT +{ + return reference_wrapper(__t); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +reference_wrapper +cref(reference_wrapper<_Tp> __t) _NOEXCEPT +{ + return cref(__t.get()); +} + +#ifndef _LIBCUDACXX_CXX03_LANG +template void ref(const _Tp&&) = delete; +template void cref(const _Tp&&) = delete; +#endif + +#if _LIBCUDACXX_STD_VER > 11 +template +struct __is_transparent : false_type {}; + +template +struct __is_transparent<_Tp, _Up, + typename __void_t::type> + : true_type {}; +#endif + +// allocator_arg_t + +struct _LIBCUDACXX_TEMPLATE_VIS allocator_arg_t { explicit allocator_arg_t() = default; }; + +#if defined(_LIBCUDACXX_CXX03_LANG) || defined(_LIBCUDACXX_BUILDING_LIBRARY) +extern _LIBCUDACXX_EXPORTED_FROM_ABI const allocator_arg_t allocator_arg; +#else +/* _LIBCUDACXX_INLINE_VAR */ constexpr allocator_arg_t allocator_arg = allocator_arg_t(); +#endif + +// uses_allocator + +template +struct __has_allocator_type +{ +private: + struct __two {char __lx; char __lxx;}; + template _LIBCUDACXX_INLINE_VISIBILITY static __two __test(...); + template _LIBCUDACXX_INLINE_VISIBILITY static char __test(typename _Up::allocator_type* = 0); +public: + static const bool value = sizeof(__test<_Tp>(0)) == 1; +}; + +template ::value> +struct __uses_allocator + : public integral_constant::value> +{ +}; + +template +struct __uses_allocator<_Tp, _Alloc, false> + : public false_type +{ +}; + +template +struct _LIBCUDACXX_TEMPLATE_VIS uses_allocator + : public __uses_allocator<_Tp, _Alloc> +{ +}; + +#if _LIBCUDACXX_STD_VER > 14 +template +_LIBCUDACXX_INLINE_VAR constexpr size_t uses_allocator_v = uses_allocator<_Tp, _Alloc>::value; +#endif + +#ifndef _LIBCUDACXX_CXX03_LANG + +// allocator construction + +template +struct __uses_alloc_ctor_imp +{ + typedef _LIBCUDACXX_NODEBUG_TYPE typename __uncvref<_Alloc>::type _RawAlloc; + static const bool __ua = uses_allocator<_Tp, _RawAlloc>::value; + static const bool __ic = + is_constructible<_Tp, allocator_arg_t, _Alloc, _Args...>::value; + static const int value = __ua ? 2 - __ic : 0; +}; + +template +struct __uses_alloc_ctor + : integral_constant::value> + {}; + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void __user_alloc_construct_impl (integral_constant, _Tp *__storage, const _Allocator &, _Args &&... __args ) +{ + new (__storage) _Tp (_CUDA_VSTD::forward<_Args>(__args)...); +} + +// FIXME: This should have a version which takes a non-const alloc. +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void __user_alloc_construct_impl (integral_constant, _Tp *__storage, const _Allocator &__a, _Args &&... __args ) +{ + new (__storage) _Tp (allocator_arg, __a, _CUDA_VSTD::forward<_Args>(__args)...); +} + +// FIXME: This should have a version which takes a non-const alloc. +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void __user_alloc_construct_impl (integral_constant, _Tp *__storage, const _Allocator &__a, _Args &&... __args ) +{ + new (__storage) _Tp (_CUDA_VSTD::forward<_Args>(__args)..., __a); +} + +#endif // _LIBCUDACXX_CXX03_LANG + +_LIBCUDACXX_END_NAMESPACE_STD + +#ifndef __cuda_std__ +#include <__pragma_pop> +#endif + +#endif // _LIBCUDACXX_FUNCTIONAL_BASE diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base_03 b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base_03 new file mode 100644 index 000000000000..4187e9f5cf1d --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base_03 @@ -0,0 +1,223 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX_FUNCTIONAL_BASE_03 +#define _LIBCUDACXX_FUNCTIONAL_BASE_03 + +// manual variadic expansion for + +// __invoke + +template +struct __enable_invoke_imp; + +template +struct __enable_invoke_imp<_Ret, _T1, true, true> { + typedef _Ret _Bullet1; + typedef _Bullet1 type; +}; + +template +struct __enable_invoke_imp<_Ret, _T1, true, false> { + typedef _Ret _Bullet2; + typedef _Bullet2 type; +}; + +template +struct __enable_invoke_imp<_Ret, _T1, false, true> { + typedef typename add_lvalue_reference< + typename __apply_cv<_T1, _Ret>::type + >::type _Bullet3; + typedef _Bullet3 type; +}; + +template +struct __enable_invoke_imp<_Ret, _T1, false, false> { + typedef typename add_lvalue_reference< + typename __apply_cv()), _Ret>::type + >::type _Bullet4; + typedef _Bullet4 type; +}; + +template +struct __enable_invoke_imp<_Ret, _T1*, false, false> { + typedef typename add_lvalue_reference< + typename __apply_cv<_T1, _Ret>::type + >::type _Bullet4; + typedef _Bullet4 type; +}; + +template , + class _Ret = typename _Traits::_ReturnType, + class _Class = typename _Traits::_ClassType> +struct __enable_invoke : __enable_invoke_imp< + _Ret, _T1, + is_member_function_pointer<_Fn>::value, + is_base_of<_Class, typename remove_reference<_T1>::type>::value> +{ +}; + +__nat __invoke(__any, ...); + +// first bullet + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __enable_invoke<_Fn, _T1>::_Bullet1 +__invoke(_Fn __f, _T1& __t1) { + return (__t1.*__f)(); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __enable_invoke<_Fn, _T1>::_Bullet1 +__invoke(_Fn __f, _T1& __t1, _A0& __a0) { + return (__t1.*__f)(__a0); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __enable_invoke<_Fn, _T1>::_Bullet1 +__invoke(_Fn __f, _T1& __t1, _A0& __a0, _A1& __a1) { + return (__t1.*__f)(__a0, __a1); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __enable_invoke<_Fn, _T1>::_Bullet1 +__invoke(_Fn __f, _T1& __t1, _A0& __a0, _A1& __a1, _A2& __a2) { + return (__t1.*__f)(__a0, __a1, __a2); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __enable_invoke<_Fn, _T1>::_Bullet2 +__invoke(_Fn __f, _T1& __t1) { + return ((*__t1).*__f)(); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __enable_invoke<_Fn, _T1>::_Bullet2 +__invoke(_Fn __f, _T1& __t1, _A0& __a0) { + return ((*__t1).*__f)(__a0); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __enable_invoke<_Fn, _T1>::_Bullet2 +__invoke(_Fn __f, _T1& __t1, _A0& __a0, _A1& __a1) { + return ((*__t1).*__f)(__a0, __a1); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __enable_invoke<_Fn, _T1>::_Bullet2 +__invoke(_Fn __f, _T1& __t1, _A0& __a0, _A1& __a1, _A2& __a2) { + return ((*__t1).*__f)(__a0, __a1, __a2); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __enable_invoke<_Fn, _T1>::_Bullet3 +__invoke(_Fn __f, _T1& __t1) { + return __t1.*__f; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __enable_invoke<_Fn, _T1>::_Bullet4 +__invoke(_Fn __f, _T1& __t1) { + return (*__t1).*__f; +} + +// fifth bullet + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +decltype(_CUDA_VSTD::declval<_Fp&>()()) +__invoke(_Fp& __f) +{ + return __f(); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +decltype(_CUDA_VSTD::declval<_Fp&>()(_CUDA_VSTD::declval<_A0&>())) +__invoke(_Fp& __f, _A0& __a0) +{ + return __f(__a0); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +decltype(_CUDA_VSTD::declval<_Fp&>()(_CUDA_VSTD::declval<_A0&>(), _CUDA_VSTD::declval<_A1&>())) +__invoke(_Fp& __f, _A0& __a0, _A1& __a1) +{ + return __f(__a0, __a1); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +decltype(_CUDA_VSTD::declval<_Fp&>()(_CUDA_VSTD::declval<_A0&>(), _CUDA_VSTD::declval<_A1&>(), _CUDA_VSTD::declval<_A2&>())) +__invoke(_Fp& __f, _A0& __a0, _A1& __a1, _A2& __a2) +{ + return __f(__a0, __a1, __a2); +} + +template >::value> +struct __invoke_return +{ + typedef typename __weak_result_type<_Fp>::result_type type; +}; + +template +struct __invoke_return<_Fp, false> +{ + typedef decltype(__invoke(_CUDA_VSTD::declval<_Fp&>())) type; +}; + +template +struct __invoke_return0 +{ + typedef decltype(__invoke(_CUDA_VSTD::declval<_Tp&>(), _CUDA_VSTD::declval<_A0&>())) type; +}; + +template +struct __invoke_return0<_Rp _Tp::*, _A0> +{ + typedef typename __enable_invoke<_Rp _Tp::*, _A0>::type type; +}; + +template +struct __invoke_return1 +{ + typedef decltype(__invoke(_CUDA_VSTD::declval<_Tp&>(), _CUDA_VSTD::declval<_A0&>(), + _CUDA_VSTD::declval<_A1&>())) type; +}; + +template +struct __invoke_return1<_Rp _Class::*, _A0, _A1> { + typedef typename __enable_invoke<_Rp _Class::*, _A0>::type type; +}; + +template +struct __invoke_return2 +{ + typedef decltype(__invoke(_CUDA_VSTD::declval<_Tp&>(), _CUDA_VSTD::declval<_A0&>(), + _CUDA_VSTD::declval<_A1&>(), + _CUDA_VSTD::declval<_A2&>())) type; +}; + +template +struct __invoke_return2<_Ret _Class::*, _A0, _A1, _A2> { + typedef typename __enable_invoke<_Ret _Class::*, _A0>::type type; +}; +#endif // _LIBCUDACXX_FUNCTIONAL_BASE_03 diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__hash_table b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__hash_table new file mode 100644 index 000000000000..9399fc210d79 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__hash_table @@ -0,0 +1,2914 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX__HASH_TABLE +#define _LIBCUDACXX__HASH_TABLE + +#include <__config> +#include +#include +#include +#include +#include +#include +#include + +#include <__debug> + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_PUSH_MACROS +#include <__undef_macros> + + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +template +struct __hash_value_type; + +#ifndef _LIBCUDACXX_CXX03_LANG +template +struct __is_hash_value_type_imp : false_type {}; + +template +struct __is_hash_value_type_imp<__hash_value_type<_Key, _Value>> : true_type {}; + +template +struct __is_hash_value_type : false_type {}; + +template +struct __is_hash_value_type<_One> : __is_hash_value_type_imp::type> {}; +#endif + +_LIBCUDACXX_FUNC_VIS +size_t __next_prime(size_t __n); + +template +struct __hash_node_base +{ + typedef typename pointer_traits<_NodePtr>::element_type __node_type; + typedef __hash_node_base __first_node; + typedef typename __rebind_pointer<_NodePtr, __first_node>::type __node_base_pointer; + typedef _NodePtr __node_pointer; + +#if defined(_LIBCUDACXX_ABI_FIX_UNORDERED_NODE_POINTER_UB) + typedef __node_base_pointer __next_pointer; +#else + typedef typename conditional< + is_pointer<__node_pointer>::value, + __node_base_pointer, + __node_pointer>::type __next_pointer; +#endif + + __next_pointer __next_; + + _LIBCUDACXX_INLINE_VISIBILITY + __next_pointer __ptr() _NOEXCEPT { + return static_cast<__next_pointer>( + pointer_traits<__node_base_pointer>::pointer_to(*this)); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __node_pointer __upcast() _NOEXCEPT { + return static_cast<__node_pointer>( + pointer_traits<__node_base_pointer>::pointer_to(*this)); + } + + _LIBCUDACXX_INLINE_VISIBILITY + size_t __hash() const _NOEXCEPT { + return static_cast<__node_type const&>(*this).__hash_; + } + + _LIBCUDACXX_INLINE_VISIBILITY __hash_node_base() _NOEXCEPT : __next_(nullptr) {} +}; + +template +struct __hash_node + : public __hash_node_base + < + typename __rebind_pointer<_VoidPtr, __hash_node<_Tp, _VoidPtr> >::type + > +{ + typedef _Tp __node_value_type; + + size_t __hash_; + __node_value_type __value_; +}; + +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +__is_hash_power2(size_t __bc) +{ + return __bc > 2 && !(__bc & (__bc - 1)); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +size_t +__constrain_hash(size_t __h, size_t __bc) +{ + return !(__bc & (__bc - 1)) ? __h & (__bc - 1) : + (__h < __bc ? __h : __h % __bc); +} + +inline _LIBCUDACXX_INLINE_VISIBILITY +size_t +__next_hash_pow2(size_t __n) +{ + return __n < 2 ? __n : (size_t(1) << (std::numeric_limits::digits - __libcpp_clz(__n-1))); +} + + +template class __hash_table; + +template class _LIBCUDACXX_TEMPLATE_VIS __hash_iterator; +template class _LIBCUDACXX_TEMPLATE_VIS __hash_const_iterator; +template class _LIBCUDACXX_TEMPLATE_VIS __hash_local_iterator; +template class _LIBCUDACXX_TEMPLATE_VIS __hash_const_local_iterator; +template class _LIBCUDACXX_TEMPLATE_VIS __hash_map_iterator; +template class _LIBCUDACXX_TEMPLATE_VIS __hash_map_const_iterator; + +template +struct __hash_key_value_types { + static_assert(!is_reference<_Tp>::value && !is_const<_Tp>::value, ""); + typedef _Tp key_type; + typedef _Tp __node_value_type; + typedef _Tp __container_value_type; + static const bool __is_map = false; + + _LIBCUDACXX_INLINE_VISIBILITY + static key_type const& __get_key(_Tp const& __v) { + return __v; + } + _LIBCUDACXX_INLINE_VISIBILITY + static __container_value_type const& __get_value(__node_value_type const& __v) { + return __v; + } + _LIBCUDACXX_INLINE_VISIBILITY + static __container_value_type* __get_ptr(__node_value_type& __n) { + return _CUDA_VSTD::addressof(__n); + } +#ifndef _LIBCUDACXX_CXX03_LANG + _LIBCUDACXX_INLINE_VISIBILITY + static __container_value_type&& __move(__node_value_type& __v) { + return _CUDA_VSTD::move(__v); + } +#endif +}; + +template +struct __hash_key_value_types<__hash_value_type<_Key, _Tp> > { + typedef _Key key_type; + typedef _Tp mapped_type; + typedef __hash_value_type<_Key, _Tp> __node_value_type; + typedef pair __container_value_type; + typedef __container_value_type __map_value_type; + static const bool __is_map = true; + + _LIBCUDACXX_INLINE_VISIBILITY + static key_type const& __get_key(__container_value_type const& __v) { + return __v.first; + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + static typename enable_if<__is_same_uncvref<_Up, __node_value_type>::value, + __container_value_type const&>::type + __get_value(_Up& __t) { + return __t.__get_value(); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + static typename enable_if<__is_same_uncvref<_Up, __container_value_type>::value, + __container_value_type const&>::type + __get_value(_Up& __t) { + return __t; + } + + _LIBCUDACXX_INLINE_VISIBILITY + static __container_value_type* __get_ptr(__node_value_type& __n) { + return _CUDA_VSTD::addressof(__n.__get_value()); + } +#ifndef _LIBCUDACXX_CXX03_LANG + _LIBCUDACXX_INLINE_VISIBILITY + static pair __move(__node_value_type& __v) { + return __v.__move(); + } +#endif + +}; + +template , + bool = _KVTypes::__is_map> +struct __hash_map_pointer_types {}; + +template +struct __hash_map_pointer_types<_Tp, _AllocPtr, _KVTypes, true> { + typedef typename _KVTypes::__map_value_type _Mv; + typedef typename __rebind_pointer<_AllocPtr, _Mv>::type + __map_value_type_pointer; + typedef typename __rebind_pointer<_AllocPtr, const _Mv>::type + __const_map_value_type_pointer; +}; + +template ::element_type> +struct __hash_node_types; + +template +struct __hash_node_types<_NodePtr, __hash_node<_Tp, _VoidPtr> > + : public __hash_key_value_types<_Tp>, __hash_map_pointer_types<_Tp, _VoidPtr> + +{ + typedef __hash_key_value_types<_Tp> __base; + +public: + typedef ptrdiff_t difference_type; + typedef size_t size_type; + + typedef typename __rebind_pointer<_NodePtr, void>::type __void_pointer; + + typedef typename pointer_traits<_NodePtr>::element_type __node_type; + typedef _NodePtr __node_pointer; + + typedef __hash_node_base<__node_pointer> __node_base_type; + typedef typename __rebind_pointer<_NodePtr, __node_base_type>::type + __node_base_pointer; + + typedef typename __node_base_type::__next_pointer __next_pointer; + + typedef _Tp __node_value_type; + typedef typename __rebind_pointer<_VoidPtr, __node_value_type>::type + __node_value_type_pointer; + typedef typename __rebind_pointer<_VoidPtr, const __node_value_type>::type + __const_node_value_type_pointer; + +private: + static_assert(!is_const<__node_type>::value, + "_NodePtr should never be a pointer to const"); + static_assert((is_same::element_type, void>::value), + "_VoidPtr does not point to unqualified void type"); + static_assert((is_same::type, + _NodePtr>::value), "_VoidPtr does not rebind to _NodePtr."); +}; + +template +struct __hash_node_types_from_iterator; +template +struct __hash_node_types_from_iterator<__hash_iterator<_NodePtr> > : __hash_node_types<_NodePtr> {}; +template +struct __hash_node_types_from_iterator<__hash_const_iterator<_NodePtr> > : __hash_node_types<_NodePtr> {}; +template +struct __hash_node_types_from_iterator<__hash_local_iterator<_NodePtr> > : __hash_node_types<_NodePtr> {}; +template +struct __hash_node_types_from_iterator<__hash_const_local_iterator<_NodePtr> > : __hash_node_types<_NodePtr> {}; + + +template +struct __make_hash_node_types { + typedef __hash_node<_NodeValueTp, _VoidPtr> _NodeTp; + typedef typename __rebind_pointer<_VoidPtr, _NodeTp>::type _NodePtr; + typedef __hash_node_types<_NodePtr> type; +}; + +template +class _LIBCUDACXX_TEMPLATE_VIS __hash_iterator +{ + typedef __hash_node_types<_NodePtr> _NodeTypes; + typedef _NodePtr __node_pointer; + typedef typename _NodeTypes::__next_pointer __next_pointer; + + __next_pointer __node_; + +public: + typedef forward_iterator_tag iterator_category; + typedef typename _NodeTypes::__node_value_type value_type; + typedef typename _NodeTypes::difference_type difference_type; + typedef value_type& reference; + typedef typename _NodeTypes::__node_value_type_pointer pointer; + + _LIBCUDACXX_INLINE_VISIBILITY __hash_iterator() _NOEXCEPT : __node_(nullptr) { + _LIBCUDACXX_DEBUG_MODE(__get_db()->__insert_i(this)); + } + +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_INLINE_VISIBILITY + __hash_iterator(const __hash_iterator& __i) + : __node_(__i.__node_) + { + __get_db()->__iterator_copy(this, &__i); + } + + _LIBCUDACXX_INLINE_VISIBILITY + ~__hash_iterator() + { + __get_db()->__erase_i(this); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_iterator& operator=(const __hash_iterator& __i) + { + if (this != &__i) + { + __get_db()->__iterator_copy(this, &__i); + __node_ = __i.__node_; + } + return *this; + } +#endif // _LIBCUDACXX_DEBUG_LEVEL >= 2 + + _LIBCUDACXX_INLINE_VISIBILITY + reference operator*() const { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to dereference a non-dereferenceable unordered container iterator"); + return __node_->__upcast()->__value_; + } + + _LIBCUDACXX_INLINE_VISIBILITY + pointer operator->() const { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to dereference a non-dereferenceable unordered container iterator"); + return pointer_traits::pointer_to(__node_->__upcast()->__value_); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_iterator& operator++() { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to increment non-incrementable unordered container iterator"); + __node_ = __node_->__next_; + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_iterator operator++(int) + { + __hash_iterator __t(*this); + ++(*this); + return __t; + } + + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator==(const __hash_iterator& __x, const __hash_iterator& __y) + { + return __x.__node_ == __y.__node_; + } + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator!=(const __hash_iterator& __x, const __hash_iterator& __y) + {return !(__x == __y);} + +private: +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_INLINE_VISIBILITY + __hash_iterator(__next_pointer __node, const void* __c) _NOEXCEPT + : __node_(__node) + { + __get_db()->__insert_ic(this, __c); + } +#else + _LIBCUDACXX_INLINE_VISIBILITY + __hash_iterator(__next_pointer __node) _NOEXCEPT + : __node_(__node) + {} +#endif + template friend class __hash_table; + template friend class _LIBCUDACXX_TEMPLATE_VIS __hash_const_iterator; + template friend class _LIBCUDACXX_TEMPLATE_VIS __hash_map_iterator; + template friend class _LIBCUDACXX_TEMPLATE_VIS unordered_map; + template friend class _LIBCUDACXX_TEMPLATE_VIS unordered_multimap; +}; + +template +class _LIBCUDACXX_TEMPLATE_VIS __hash_const_iterator +{ + static_assert(!is_const::element_type>::value, ""); + typedef __hash_node_types<_NodePtr> _NodeTypes; + typedef _NodePtr __node_pointer; + typedef typename _NodeTypes::__next_pointer __next_pointer; + + __next_pointer __node_; + +public: + typedef __hash_iterator<_NodePtr> __non_const_iterator; + + typedef forward_iterator_tag iterator_category; + typedef typename _NodeTypes::__node_value_type value_type; + typedef typename _NodeTypes::difference_type difference_type; + typedef const value_type& reference; + typedef typename _NodeTypes::__const_node_value_type_pointer pointer; + + + _LIBCUDACXX_INLINE_VISIBILITY __hash_const_iterator() _NOEXCEPT : __node_(nullptr) { + _LIBCUDACXX_DEBUG_MODE(__get_db()->__insert_i(this)); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_iterator(const __non_const_iterator& __x) _NOEXCEPT + : __node_(__x.__node_) + { + _LIBCUDACXX_DEBUG_MODE(__get_db()->__iterator_copy(this, &__x)); + } + +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_iterator(const __hash_const_iterator& __i) + : __node_(__i.__node_) + { + __get_db()->__iterator_copy(this, &__i); + } + + _LIBCUDACXX_INLINE_VISIBILITY + ~__hash_const_iterator() + { + __get_db()->__erase_i(this); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_iterator& operator=(const __hash_const_iterator& __i) + { + if (this != &__i) + { + __get_db()->__iterator_copy(this, &__i); + __node_ = __i.__node_; + } + return *this; + } +#endif // _LIBCUDACXX_DEBUG_LEVEL >= 2 + + _LIBCUDACXX_INLINE_VISIBILITY + reference operator*() const { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to dereference a non-dereferenceable unordered container const_iterator"); + return __node_->__upcast()->__value_; + } + _LIBCUDACXX_INLINE_VISIBILITY + pointer operator->() const { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to dereference a non-dereferenceable unordered container const_iterator"); + return pointer_traits::pointer_to(__node_->__upcast()->__value_); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_iterator& operator++() { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to increment non-incrementable unordered container const_iterator"); + __node_ = __node_->__next_; + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_iterator operator++(int) + { + __hash_const_iterator __t(*this); + ++(*this); + return __t; + } + + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator==(const __hash_const_iterator& __x, const __hash_const_iterator& __y) + { + return __x.__node_ == __y.__node_; + } + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator!=(const __hash_const_iterator& __x, const __hash_const_iterator& __y) + {return !(__x == __y);} + +private: +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_iterator(__next_pointer __node, const void* __c) _NOEXCEPT + : __node_(__node) + { + __get_db()->__insert_ic(this, __c); + } +#else + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_iterator(__next_pointer __node) _NOEXCEPT + : __node_(__node) + {} +#endif + template friend class __hash_table; + template friend class _LIBCUDACXX_TEMPLATE_VIS __hash_map_const_iterator; + template friend class _LIBCUDACXX_TEMPLATE_VIS unordered_map; + template friend class _LIBCUDACXX_TEMPLATE_VIS unordered_multimap; +}; + +template +class _LIBCUDACXX_TEMPLATE_VIS __hash_local_iterator +{ + typedef __hash_node_types<_NodePtr> _NodeTypes; + typedef _NodePtr __node_pointer; + typedef typename _NodeTypes::__next_pointer __next_pointer; + + __next_pointer __node_; + size_t __bucket_; + size_t __bucket_count_; + +public: + typedef forward_iterator_tag iterator_category; + typedef typename _NodeTypes::__node_value_type value_type; + typedef typename _NodeTypes::difference_type difference_type; + typedef value_type& reference; + typedef typename _NodeTypes::__node_value_type_pointer pointer; + + _LIBCUDACXX_INLINE_VISIBILITY __hash_local_iterator() _NOEXCEPT : __node_(nullptr) { + _LIBCUDACXX_DEBUG_MODE(__get_db()->__insert_i(this)); + } + +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_INLINE_VISIBILITY + __hash_local_iterator(const __hash_local_iterator& __i) + : __node_(__i.__node_), + __bucket_(__i.__bucket_), + __bucket_count_(__i.__bucket_count_) + { + __get_db()->__iterator_copy(this, &__i); + } + + _LIBCUDACXX_INLINE_VISIBILITY + ~__hash_local_iterator() + { + __get_db()->__erase_i(this); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_local_iterator& operator=(const __hash_local_iterator& __i) + { + if (this != &__i) + { + __get_db()->__iterator_copy(this, &__i); + __node_ = __i.__node_; + __bucket_ = __i.__bucket_; + __bucket_count_ = __i.__bucket_count_; + } + return *this; + } +#endif // _LIBCUDACXX_DEBUG_LEVEL >= 2 + + _LIBCUDACXX_INLINE_VISIBILITY + reference operator*() const { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to dereference a non-dereferenceable unordered container local_iterator"); + return __node_->__upcast()->__value_; + } + + _LIBCUDACXX_INLINE_VISIBILITY + pointer operator->() const { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to dereference a non-dereferenceable unordered container local_iterator"); + return pointer_traits::pointer_to(__node_->__upcast()->__value_); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_local_iterator& operator++() { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to increment non-incrementable unordered container local_iterator"); + __node_ = __node_->__next_; + if (__node_ != nullptr && __constrain_hash(__node_->__hash(), __bucket_count_) != __bucket_) + __node_ = nullptr; + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_local_iterator operator++(int) + { + __hash_local_iterator __t(*this); + ++(*this); + return __t; + } + + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator==(const __hash_local_iterator& __x, const __hash_local_iterator& __y) + { + return __x.__node_ == __y.__node_; + } + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator!=(const __hash_local_iterator& __x, const __hash_local_iterator& __y) + {return !(__x == __y);} + +private: +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_INLINE_VISIBILITY + __hash_local_iterator(__next_pointer __node, size_t __bucket, + size_t __bucket_count, const void* __c) _NOEXCEPT + : __node_(__node), + __bucket_(__bucket), + __bucket_count_(__bucket_count) + { + __get_db()->__insert_ic(this, __c); + if (__node_ != nullptr) + __node_ = __node_->__next_; + } +#else + _LIBCUDACXX_INLINE_VISIBILITY + __hash_local_iterator(__next_pointer __node, size_t __bucket, + size_t __bucket_count) _NOEXCEPT + : __node_(__node), + __bucket_(__bucket), + __bucket_count_(__bucket_count) + { + if (__node_ != nullptr) + __node_ = __node_->__next_; + } +#endif + template friend class __hash_table; + template friend class _LIBCUDACXX_TEMPLATE_VIS __hash_const_local_iterator; + template friend class _LIBCUDACXX_TEMPLATE_VIS __hash_map_iterator; +}; + +template +class _LIBCUDACXX_TEMPLATE_VIS __hash_const_local_iterator +{ + typedef __hash_node_types<_ConstNodePtr> _NodeTypes; + typedef _ConstNodePtr __node_pointer; + typedef typename _NodeTypes::__next_pointer __next_pointer; + + __next_pointer __node_; + size_t __bucket_; + size_t __bucket_count_; + + typedef pointer_traits<__node_pointer> __pointer_traits; + typedef typename __pointer_traits::element_type __node; + typedef typename remove_const<__node>::type __non_const_node; + typedef typename __rebind_pointer<__node_pointer, __non_const_node>::type + __non_const_node_pointer; +public: + typedef __hash_local_iterator<__non_const_node_pointer> + __non_const_iterator; + + typedef forward_iterator_tag iterator_category; + typedef typename _NodeTypes::__node_value_type value_type; + typedef typename _NodeTypes::difference_type difference_type; + typedef const value_type& reference; + typedef typename _NodeTypes::__const_node_value_type_pointer pointer; + + + _LIBCUDACXX_INLINE_VISIBILITY __hash_const_local_iterator() _NOEXCEPT : __node_(nullptr) { + _LIBCUDACXX_DEBUG_MODE(__get_db()->__insert_i(this)); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_local_iterator(const __non_const_iterator& __x) _NOEXCEPT + : __node_(__x.__node_), + __bucket_(__x.__bucket_), + __bucket_count_(__x.__bucket_count_) + { + _LIBCUDACXX_DEBUG_MODE(__get_db()->__iterator_copy(this, &__x)); + } + +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_local_iterator(const __hash_const_local_iterator& __i) + : __node_(__i.__node_), + __bucket_(__i.__bucket_), + __bucket_count_(__i.__bucket_count_) + { + __get_db()->__iterator_copy(this, &__i); + } + + _LIBCUDACXX_INLINE_VISIBILITY + ~__hash_const_local_iterator() + { + __get_db()->__erase_i(this); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_local_iterator& operator=(const __hash_const_local_iterator& __i) + { + if (this != &__i) + { + __get_db()->__iterator_copy(this, &__i); + __node_ = __i.__node_; + __bucket_ = __i.__bucket_; + __bucket_count_ = __i.__bucket_count_; + } + return *this; + } +#endif // _LIBCUDACXX_DEBUG_LEVEL >= 2 + + _LIBCUDACXX_INLINE_VISIBILITY + reference operator*() const { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to dereference a non-dereferenceable unordered container const_local_iterator"); + return __node_->__upcast()->__value_; + } + + _LIBCUDACXX_INLINE_VISIBILITY + pointer operator->() const { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to dereference a non-dereferenceable unordered container const_local_iterator"); + return pointer_traits::pointer_to(__node_->__upcast()->__value_); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_local_iterator& operator++() { + _LIBCUDACXX_DEBUG_ASSERT(__get_const_db()->__dereferenceable(this), + "Attempted to increment non-incrementable unordered container const_local_iterator"); + __node_ = __node_->__next_; + if (__node_ != nullptr && __constrain_hash(__node_->__hash(), __bucket_count_) != __bucket_) + __node_ = nullptr; + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_local_iterator operator++(int) + { + __hash_const_local_iterator __t(*this); + ++(*this); + return __t; + } + + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator==(const __hash_const_local_iterator& __x, const __hash_const_local_iterator& __y) + { + return __x.__node_ == __y.__node_; + } + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator!=(const __hash_const_local_iterator& __x, const __hash_const_local_iterator& __y) + {return !(__x == __y);} + +private: +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_local_iterator(__next_pointer __node, size_t __bucket, + size_t __bucket_count, const void* __c) _NOEXCEPT + : __node_(__node), + __bucket_(__bucket), + __bucket_count_(__bucket_count) + { + __get_db()->__insert_ic(this, __c); + if (__node_ != nullptr) + __node_ = __node_->__next_; + } +#else + _LIBCUDACXX_INLINE_VISIBILITY + __hash_const_local_iterator(__next_pointer __node, size_t __bucket, + size_t __bucket_count) _NOEXCEPT + : __node_(__node), + __bucket_(__bucket), + __bucket_count_(__bucket_count) + { + if (__node_ != nullptr) + __node_ = __node_->__next_; + } +#endif + template friend class __hash_table; + template friend class _LIBCUDACXX_TEMPLATE_VIS __hash_map_const_iterator; +}; + +template +class __bucket_list_deallocator +{ + typedef _Alloc allocator_type; + typedef allocator_traits __alloc_traits; + typedef typename __alloc_traits::size_type size_type; + + __compressed_pair __data_; +public: + typedef typename __alloc_traits::pointer pointer; + + _LIBCUDACXX_INLINE_VISIBILITY + __bucket_list_deallocator() + _NOEXCEPT_(is_nothrow_default_constructible::value) + : __data_(0) {} + + _LIBCUDACXX_INLINE_VISIBILITY + __bucket_list_deallocator(const allocator_type& __a, size_type __size) + _NOEXCEPT_(is_nothrow_copy_constructible::value) + : __data_(__size, __a) {} + +#ifndef _LIBCUDACXX_CXX03_LANG + _LIBCUDACXX_INLINE_VISIBILITY + __bucket_list_deallocator(__bucket_list_deallocator&& __x) + _NOEXCEPT_(is_nothrow_move_constructible::value) + : __data_(_CUDA_VSTD::move(__x.__data_)) + { + __x.size() = 0; + } +#endif + + _LIBCUDACXX_INLINE_VISIBILITY + size_type& size() _NOEXCEPT {return __data_.first();} + _LIBCUDACXX_INLINE_VISIBILITY + size_type size() const _NOEXCEPT {return __data_.first();} + + _LIBCUDACXX_INLINE_VISIBILITY + allocator_type& __alloc() _NOEXCEPT {return __data_.second();} + _LIBCUDACXX_INLINE_VISIBILITY + const allocator_type& __alloc() const _NOEXCEPT {return __data_.second();} + + _LIBCUDACXX_INLINE_VISIBILITY + void operator()(pointer __p) _NOEXCEPT + { + __alloc_traits::deallocate(__alloc(), __p, size()); + } +}; + +template class __hash_map_node_destructor; + +template +class __hash_node_destructor +{ + typedef _Alloc allocator_type; + typedef allocator_traits __alloc_traits; + +public: + typedef typename __alloc_traits::pointer pointer; +private: + typedef __hash_node_types _NodeTypes; + + allocator_type& __na_; + +public: + bool __value_constructed; + + __hash_node_destructor(__hash_node_destructor const&) = default; + __hash_node_destructor& operator=(const __hash_node_destructor&) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + explicit __hash_node_destructor(allocator_type& __na, + bool __constructed = false) _NOEXCEPT + : __na_(__na), + __value_constructed(__constructed) + {} + + _LIBCUDACXX_INLINE_VISIBILITY + void operator()(pointer __p) _NOEXCEPT + { + if (__value_constructed) + __alloc_traits::destroy(__na_, _NodeTypes::__get_ptr(__p->__value_)); + if (__p) + __alloc_traits::deallocate(__na_, __p, 1); + } + + template friend class __hash_map_node_destructor; +}; + +#if _LIBCUDACXX_STD_VER > 14 +template +struct __generic_container_node_destructor; + +template +struct __generic_container_node_destructor<__hash_node<_Tp, _VoidPtr>, _Alloc> + : __hash_node_destructor<_Alloc> +{ + using __hash_node_destructor<_Alloc>::__hash_node_destructor; +}; +#endif + +template +struct __enforce_unordered_container_requirements { +#ifndef _LIBCUDACXX_CXX03_LANG + static_assert(__check_hash_requirements<_Key, _Hash>::value, + "the specified hash does not meet the Hash requirements"); + static_assert(is_copy_constructible<_Equal>::value, + "the specified comparator is required to be copy constructible"); +#endif + typedef int type; +}; + +template +#ifndef _LIBCUDACXX_CXX03_LANG + _LIBCUDACXX_DIAGNOSE_WARNING(!__invokable<_Equal const&, _Key const&, _Key const&>::value, + "the specified comparator type does not provide a viable const call operator") + _LIBCUDACXX_DIAGNOSE_WARNING(!__invokable<_Hash const&, _Key const&>::value, + "the specified hash functor does not provide a viable const call operator") +#endif +typename __enforce_unordered_container_requirements<_Key, _Hash, _Equal>::type +__diagnose_unordered_container_requirements(int); + +// This dummy overload is used so that the compiler won't emit a spurious +// "no matching function for call to __diagnose_unordered_xxx" diagnostic +// when the overload above causes a hard error. +template +int __diagnose_unordered_container_requirements(void*); + +template +class __hash_table +{ +public: + typedef _Tp value_type; + typedef _Hash hasher; + typedef _Equal key_equal; + typedef _Alloc allocator_type; + +private: + typedef allocator_traits __alloc_traits; + typedef typename + __make_hash_node_types::type + _NodeTypes; +public: + + typedef typename _NodeTypes::__node_value_type __node_value_type; + typedef typename _NodeTypes::__container_value_type __container_value_type; + typedef typename _NodeTypes::key_type key_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef typename __alloc_traits::pointer pointer; + typedef typename __alloc_traits::const_pointer const_pointer; +#ifndef _LIBCUDACXX_ABI_FIX_UNORDERED_CONTAINER_SIZE_TYPE + typedef typename __alloc_traits::size_type size_type; +#else + typedef typename _NodeTypes::size_type size_type; +#endif + typedef typename _NodeTypes::difference_type difference_type; +public: + // Create __node + + typedef typename _NodeTypes::__node_type __node; + typedef typename __rebind_alloc_helper<__alloc_traits, __node>::type __node_allocator; + typedef allocator_traits<__node_allocator> __node_traits; + typedef typename _NodeTypes::__void_pointer __void_pointer; + typedef typename _NodeTypes::__node_pointer __node_pointer; + typedef typename _NodeTypes::__node_pointer __node_const_pointer; + typedef typename _NodeTypes::__node_base_type __first_node; + typedef typename _NodeTypes::__node_base_pointer __node_base_pointer; + typedef typename _NodeTypes::__next_pointer __next_pointer; + +private: + // check for sane allocator pointer rebinding semantics. Rebinding the + // allocator for a new pointer type should be exactly the same as rebinding + // the pointer using 'pointer_traits'. + static_assert((is_same<__node_pointer, typename __node_traits::pointer>::value), + "Allocator does not rebind pointers in a sane manner."); + typedef typename __rebind_alloc_helper<__node_traits, __first_node>::type + __node_base_allocator; + typedef allocator_traits<__node_base_allocator> __node_base_traits; + static_assert((is_same<__node_base_pointer, typename __node_base_traits::pointer>::value), + "Allocator does not rebind pointers in a sane manner."); + +private: + + typedef typename __rebind_alloc_helper<__node_traits, __next_pointer>::type __pointer_allocator; + typedef __bucket_list_deallocator<__pointer_allocator> __bucket_list_deleter; + typedef unique_ptr<__next_pointer[], __bucket_list_deleter> __bucket_list; + typedef allocator_traits<__pointer_allocator> __pointer_alloc_traits; + typedef typename __bucket_list_deleter::pointer __node_pointer_pointer; + + // --- Member data begin --- + __bucket_list __bucket_list_; + __compressed_pair<__first_node, __node_allocator> __p1_; + __compressed_pair __p2_; + __compressed_pair __p3_; + // --- Member data end --- + + _LIBCUDACXX_INLINE_VISIBILITY + size_type& size() _NOEXCEPT {return __p2_.first();} +public: + _LIBCUDACXX_INLINE_VISIBILITY + size_type size() const _NOEXCEPT {return __p2_.first();} + + _LIBCUDACXX_INLINE_VISIBILITY + hasher& hash_function() _NOEXCEPT {return __p2_.second();} + _LIBCUDACXX_INLINE_VISIBILITY + const hasher& hash_function() const _NOEXCEPT {return __p2_.second();} + + _LIBCUDACXX_INLINE_VISIBILITY + float& max_load_factor() _NOEXCEPT {return __p3_.first();} + _LIBCUDACXX_INLINE_VISIBILITY + float max_load_factor() const _NOEXCEPT {return __p3_.first();} + + _LIBCUDACXX_INLINE_VISIBILITY + key_equal& key_eq() _NOEXCEPT {return __p3_.second();} + _LIBCUDACXX_INLINE_VISIBILITY + const key_equal& key_eq() const _NOEXCEPT {return __p3_.second();} + + _LIBCUDACXX_INLINE_VISIBILITY + __node_allocator& __node_alloc() _NOEXCEPT {return __p1_.second();} + _LIBCUDACXX_INLINE_VISIBILITY + const __node_allocator& __node_alloc() const _NOEXCEPT + {return __p1_.second();} + +public: + typedef __hash_iterator<__node_pointer> iterator; + typedef __hash_const_iterator<__node_pointer> const_iterator; + typedef __hash_local_iterator<__node_pointer> local_iterator; + typedef __hash_const_local_iterator<__node_pointer> const_local_iterator; + + _LIBCUDACXX_INLINE_VISIBILITY + __hash_table() + _NOEXCEPT_( + is_nothrow_default_constructible<__bucket_list>::value && + is_nothrow_default_constructible<__first_node>::value && + is_nothrow_default_constructible<__node_allocator>::value && + is_nothrow_default_constructible::value && + is_nothrow_default_constructible::value); + _LIBCUDACXX_INLINE_VISIBILITY + __hash_table(const hasher& __hf, const key_equal& __eql); + __hash_table(const hasher& __hf, const key_equal& __eql, + const allocator_type& __a); + explicit __hash_table(const allocator_type& __a); + __hash_table(const __hash_table& __u); + __hash_table(const __hash_table& __u, const allocator_type& __a); +#ifndef _LIBCUDACXX_CXX03_LANG + __hash_table(__hash_table&& __u) + _NOEXCEPT_( + is_nothrow_move_constructible<__bucket_list>::value && + is_nothrow_move_constructible<__first_node>::value && + is_nothrow_move_constructible<__node_allocator>::value && + is_nothrow_move_constructible::value && + is_nothrow_move_constructible::value); + __hash_table(__hash_table&& __u, const allocator_type& __a); +#endif // _LIBCUDACXX_CXX03_LANG + ~__hash_table(); + + __hash_table& operator=(const __hash_table& __u); +#ifndef _LIBCUDACXX_CXX03_LANG + _LIBCUDACXX_INLINE_VISIBILITY + __hash_table& operator=(__hash_table&& __u) + _NOEXCEPT_( + __node_traits::propagate_on_container_move_assignment::value && + is_nothrow_move_assignable<__node_allocator>::value && + is_nothrow_move_assignable::value && + is_nothrow_move_assignable::value); +#endif + template + void __assign_unique(_InputIterator __first, _InputIterator __last); + template + void __assign_multi(_InputIterator __first, _InputIterator __last); + + _LIBCUDACXX_INLINE_VISIBILITY + size_type max_size() const _NOEXCEPT + { + return std::min( + __node_traits::max_size(__node_alloc()), + numeric_limits::max() + ); + } + +private: + _LIBCUDACXX_INLINE_VISIBILITY + __next_pointer __node_insert_multi_prepare(size_t __cp_hash, + value_type& __cp_val); + _LIBCUDACXX_INLINE_VISIBILITY + void __node_insert_multi_perform(__node_pointer __cp, + __next_pointer __pn) _NOEXCEPT; + + _LIBCUDACXX_INLINE_VISIBILITY + __next_pointer __node_insert_unique_prepare(size_t __nd_hash, + value_type& __nd_val); + _LIBCUDACXX_INLINE_VISIBILITY + void __node_insert_unique_perform(__node_pointer __ptr) _NOEXCEPT; + +public: + _LIBCUDACXX_INLINE_VISIBILITY + pair __node_insert_unique(__node_pointer __nd); + _LIBCUDACXX_INLINE_VISIBILITY + iterator __node_insert_multi(__node_pointer __nd); + _LIBCUDACXX_INLINE_VISIBILITY + iterator __node_insert_multi(const_iterator __p, + __node_pointer __nd); + +#ifndef _LIBCUDACXX_CXX03_LANG + template + _LIBCUDACXX_INLINE_VISIBILITY + pair __emplace_unique_key_args(_Key const& __k, _Args&&... __args); + + template + _LIBCUDACXX_INLINE_VISIBILITY + pair __emplace_unique_impl(_Args&&... __args); + + template + _LIBCUDACXX_INLINE_VISIBILITY + pair __emplace_unique(_Pp&& __x) { + return __emplace_unique_extract_key(_CUDA_VSTD::forward<_Pp>(__x), + __can_extract_key<_Pp, key_type>()); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename enable_if< + __can_extract_map_key<_First, key_type, __container_value_type>::value, + pair + >::type __emplace_unique(_First&& __f, _Second&& __s) { + return __emplace_unique_key_args(__f, _CUDA_VSTD::forward<_First>(__f), + _CUDA_VSTD::forward<_Second>(__s)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pair __emplace_unique(_Args&&... __args) { + return __emplace_unique_impl(_CUDA_VSTD::forward<_Args>(__args)...); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pair + __emplace_unique_extract_key(_Pp&& __x, __extract_key_fail_tag) { + return __emplace_unique_impl(_CUDA_VSTD::forward<_Pp>(__x)); + } + template + _LIBCUDACXX_INLINE_VISIBILITY + pair + __emplace_unique_extract_key(_Pp&& __x, __extract_key_self_tag) { + return __emplace_unique_key_args(__x, _CUDA_VSTD::forward<_Pp>(__x)); + } + template + _LIBCUDACXX_INLINE_VISIBILITY + pair + __emplace_unique_extract_key(_Pp&& __x, __extract_key_first_tag) { + return __emplace_unique_key_args(__x.first, _CUDA_VSTD::forward<_Pp>(__x)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __emplace_multi(_Args&&... __args); + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __emplace_hint_multi(const_iterator __p, _Args&&... __args); + + + _LIBCUDACXX_INLINE_VISIBILITY + pair + __insert_unique(__container_value_type&& __x) { + return __emplace_unique_key_args(_NodeTypes::__get_key(__x), _CUDA_VSTD::move(__x)); + } + + template ::value + >::type> + _LIBCUDACXX_INLINE_VISIBILITY + pair __insert_unique(_Pp&& __x) { + return __emplace_unique(_CUDA_VSTD::forward<_Pp>(__x)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_multi(_Pp&& __x) { + return __emplace_multi(_CUDA_VSTD::forward<_Pp>(__x)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_multi(const_iterator __p, _Pp&& __x) { + return __emplace_hint_multi(__p, _CUDA_VSTD::forward<_Pp>(__x)); + } + +#else // !defined(_LIBCUDACXX_CXX03_LANG) + template + _LIBCUDACXX_INLINE_VISIBILITY + pair __emplace_unique_key_args(_Key const&, _Args& __args); + + iterator __insert_multi(const __container_value_type& __x); + iterator __insert_multi(const_iterator __p, const __container_value_type& __x); +#endif + + _LIBCUDACXX_INLINE_VISIBILITY + pair __insert_unique(const __container_value_type& __x) { + return __emplace_unique_key_args(_NodeTypes::__get_key(__x), __x); + } + +#if _LIBCUDACXX_STD_VER > 14 + template + _LIBCUDACXX_INLINE_VISIBILITY + _InsertReturnType __node_handle_insert_unique(_NodeHandle&& __nh); + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __node_handle_insert_unique(const_iterator __hint, + _NodeHandle&& __nh); + template + _LIBCUDACXX_INLINE_VISIBILITY + void __node_handle_merge_unique(_Table& __source); + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __node_handle_insert_multi(_NodeHandle&& __nh); + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __node_handle_insert_multi(const_iterator __hint, _NodeHandle&& __nh); + template + _LIBCUDACXX_INLINE_VISIBILITY + void __node_handle_merge_multi(_Table& __source); + + template + _LIBCUDACXX_INLINE_VISIBILITY + _NodeHandle __node_handle_extract(key_type const& __key); + template + _LIBCUDACXX_INLINE_VISIBILITY + _NodeHandle __node_handle_extract(const_iterator __it); +#endif + + void clear() _NOEXCEPT; + void rehash(size_type __n); + _LIBCUDACXX_INLINE_VISIBILITY void reserve(size_type __n) + {rehash(static_cast(ceil(__n / max_load_factor())));} + + _LIBCUDACXX_INLINE_VISIBILITY + size_type bucket_count() const _NOEXCEPT + { + return __bucket_list_.get_deleter().size(); + } + + _LIBCUDACXX_INLINE_VISIBILITY + iterator begin() _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + iterator end() _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + const_iterator begin() const _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + const_iterator end() const _NOEXCEPT; + + template + _LIBCUDACXX_INLINE_VISIBILITY + size_type bucket(const _Key& __k) const + { + _LIBCUDACXX_ASSERT(bucket_count() > 0, + "unordered container::bucket(key) called when bucket_count() == 0"); + return __constrain_hash(hash_function()(__k), bucket_count()); + } + + template + iterator find(const _Key& __x); + template + const_iterator find(const _Key& __x) const; + + typedef __hash_node_destructor<__node_allocator> _Dp; + typedef unique_ptr<__node, _Dp> __node_holder; + + iterator erase(const_iterator __p); + iterator erase(const_iterator __first, const_iterator __last); + template + size_type __erase_unique(const _Key& __k); + template + size_type __erase_multi(const _Key& __k); + __node_holder remove(const_iterator __p) _NOEXCEPT; + + template + _LIBCUDACXX_INLINE_VISIBILITY + size_type __count_unique(const _Key& __k) const; + template + size_type __count_multi(const _Key& __k) const; + + template + pair + __equal_range_unique(const _Key& __k); + template + pair + __equal_range_unique(const _Key& __k) const; + + template + pair + __equal_range_multi(const _Key& __k); + template + pair + __equal_range_multi(const _Key& __k) const; + + void swap(__hash_table& __u) +#if _LIBCUDACXX_STD_VER <= 11 + _NOEXCEPT_( + __is_nothrow_swappable::value && __is_nothrow_swappable::value + && (!allocator_traits<__pointer_allocator>::propagate_on_container_swap::value + || __is_nothrow_swappable<__pointer_allocator>::value) + && (!__node_traits::propagate_on_container_swap::value + || __is_nothrow_swappable<__node_allocator>::value) + ); +#else + _NOEXCEPT_(__is_nothrow_swappable::value && __is_nothrow_swappable::value); +#endif + + _LIBCUDACXX_INLINE_VISIBILITY + size_type max_bucket_count() const _NOEXCEPT + {return max_size(); } + size_type bucket_size(size_type __n) const; + _LIBCUDACXX_INLINE_VISIBILITY float load_factor() const _NOEXCEPT + { + size_type __bc = bucket_count(); + return __bc != 0 ? (float)size() / __bc : 0.f; + } + _LIBCUDACXX_INLINE_VISIBILITY void max_load_factor(float __mlf) _NOEXCEPT + { + _LIBCUDACXX_ASSERT(__mlf > 0, + "unordered container::max_load_factor(lf) called with lf <= 0"); + max_load_factor() = _CUDA_VSTD::max(__mlf, load_factor()); + } + + _LIBCUDACXX_INLINE_VISIBILITY + local_iterator + begin(size_type __n) + { + _LIBCUDACXX_ASSERT(__n < bucket_count(), + "unordered container::begin(n) called with n >= bucket_count()"); +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return local_iterator(__bucket_list_[__n], __n, bucket_count(), this); +#else + return local_iterator(__bucket_list_[__n], __n, bucket_count()); +#endif + } + + _LIBCUDACXX_INLINE_VISIBILITY + local_iterator + end(size_type __n) + { + _LIBCUDACXX_ASSERT(__n < bucket_count(), + "unordered container::end(n) called with n >= bucket_count()"); +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return local_iterator(nullptr, __n, bucket_count(), this); +#else + return local_iterator(nullptr, __n, bucket_count()); +#endif + } + + _LIBCUDACXX_INLINE_VISIBILITY + const_local_iterator + cbegin(size_type __n) const + { + _LIBCUDACXX_ASSERT(__n < bucket_count(), + "unordered container::cbegin(n) called with n >= bucket_count()"); +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return const_local_iterator(__bucket_list_[__n], __n, bucket_count(), this); +#else + return const_local_iterator(__bucket_list_[__n], __n, bucket_count()); +#endif + } + + _LIBCUDACXX_INLINE_VISIBILITY + const_local_iterator + cend(size_type __n) const + { + _LIBCUDACXX_ASSERT(__n < bucket_count(), + "unordered container::cend(n) called with n >= bucket_count()"); +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return const_local_iterator(nullptr, __n, bucket_count(), this); +#else + return const_local_iterator(nullptr, __n, bucket_count()); +#endif + } + +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + + bool __dereferenceable(const const_iterator* __i) const; + bool __decrementable(const const_iterator* __i) const; + bool __addable(const const_iterator* __i, ptrdiff_t __n) const; + bool __subscriptable(const const_iterator* __i, ptrdiff_t __n) const; + +#endif // _LIBCUDACXX_DEBUG_LEVEL >= 2 + +private: + void __rehash(size_type __n); + +#ifndef _LIBCUDACXX_CXX03_LANG + template + __node_holder __construct_node(_Args&& ...__args); + + template + __node_holder __construct_node_hash(size_t __hash, _First&& __f, _Rest&&... __rest); +#else // _LIBCUDACXX_CXX03_LANG + __node_holder __construct_node(const __container_value_type& __v); + __node_holder __construct_node_hash(size_t __hash, const __container_value_type& __v); +#endif + + + _LIBCUDACXX_INLINE_VISIBILITY + void __copy_assign_alloc(const __hash_table& __u) + {__copy_assign_alloc(__u, integral_constant());} + void __copy_assign_alloc(const __hash_table& __u, true_type); + _LIBCUDACXX_INLINE_VISIBILITY + void __copy_assign_alloc(const __hash_table&, false_type) {} + +#ifndef _LIBCUDACXX_CXX03_LANG + void __move_assign(__hash_table& __u, false_type); + void __move_assign(__hash_table& __u, true_type) + _NOEXCEPT_( + is_nothrow_move_assignable<__node_allocator>::value && + is_nothrow_move_assignable::value && + is_nothrow_move_assignable::value); + _LIBCUDACXX_INLINE_VISIBILITY + void __move_assign_alloc(__hash_table& __u) + _NOEXCEPT_( + !__node_traits::propagate_on_container_move_assignment::value || + (is_nothrow_move_assignable<__pointer_allocator>::value && + is_nothrow_move_assignable<__node_allocator>::value)) + {__move_assign_alloc(__u, integral_constant());} + _LIBCUDACXX_INLINE_VISIBILITY + void __move_assign_alloc(__hash_table& __u, true_type) + _NOEXCEPT_( + is_nothrow_move_assignable<__pointer_allocator>::value && + is_nothrow_move_assignable<__node_allocator>::value) + { + __bucket_list_.get_deleter().__alloc() = + _CUDA_VSTD::move(__u.__bucket_list_.get_deleter().__alloc()); + __node_alloc() = _CUDA_VSTD::move(__u.__node_alloc()); + } + _LIBCUDACXX_INLINE_VISIBILITY + void __move_assign_alloc(__hash_table&, false_type) _NOEXCEPT {} +#endif // _LIBCUDACXX_CXX03_LANG + + void __deallocate_node(__next_pointer __np) _NOEXCEPT; + __next_pointer __detach() _NOEXCEPT; + + template friend class _LIBCUDACXX_TEMPLATE_VIS unordered_map; + template friend class _LIBCUDACXX_TEMPLATE_VIS unordered_multimap; +}; + +template +inline +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table() + _NOEXCEPT_( + is_nothrow_default_constructible<__bucket_list>::value && + is_nothrow_default_constructible<__first_node>::value && + is_nothrow_default_constructible<__node_allocator>::value && + is_nothrow_default_constructible::value && + is_nothrow_default_constructible::value) + : __p2_(0), + __p3_(1.0f) +{ +} + +template +inline +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const hasher& __hf, + const key_equal& __eql) + : __bucket_list_(nullptr, __bucket_list_deleter()), + __p1_(), + __p2_(0, __hf), + __p3_(1.0f, __eql) +{ +} + +template +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const hasher& __hf, + const key_equal& __eql, + const allocator_type& __a) + : __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)), + __p1_(__second_tag(), __node_allocator(__a)), + __p2_(0, __hf), + __p3_(1.0f, __eql) +{ +} + +template +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const allocator_type& __a) + : __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)), + __p1_(__second_tag(), __node_allocator(__a)), + __p2_(0), + __p3_(1.0f) +{ +} + +template +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const __hash_table& __u) + : __bucket_list_(nullptr, + __bucket_list_deleter(allocator_traits<__pointer_allocator>:: + select_on_container_copy_construction( + __u.__bucket_list_.get_deleter().__alloc()), 0)), + __p1_(__second_tag(), allocator_traits<__node_allocator>:: + select_on_container_copy_construction(__u.__node_alloc())), + __p2_(0, __u.hash_function()), + __p3_(__u.__p3_) +{ +} + +template +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(const __hash_table& __u, + const allocator_type& __a) + : __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)), + __p1_(__second_tag(), __node_allocator(__a)), + __p2_(0, __u.hash_function()), + __p3_(__u.__p3_) +{ +} + +#ifndef _LIBCUDACXX_CXX03_LANG + +template +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(__hash_table&& __u) + _NOEXCEPT_( + is_nothrow_move_constructible<__bucket_list>::value && + is_nothrow_move_constructible<__first_node>::value && + is_nothrow_move_constructible<__node_allocator>::value && + is_nothrow_move_constructible::value && + is_nothrow_move_constructible::value) + : __bucket_list_(_CUDA_VSTD::move(__u.__bucket_list_)), + __p1_(_CUDA_VSTD::move(__u.__p1_)), + __p2_(_CUDA_VSTD::move(__u.__p2_)), + __p3_(_CUDA_VSTD::move(__u.__p3_)) +{ + if (size() > 0) + { + __bucket_list_[__constrain_hash(__p1_.first().__next_->__hash(), bucket_count())] = + __p1_.first().__ptr(); + __u.__p1_.first().__next_ = nullptr; + __u.size() = 0; + } +} + +template +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(__hash_table&& __u, + const allocator_type& __a) + : __bucket_list_(nullptr, __bucket_list_deleter(__pointer_allocator(__a), 0)), + __p1_(__second_tag(), __node_allocator(__a)), + __p2_(0, _CUDA_VSTD::move(__u.hash_function())), + __p3_(_CUDA_VSTD::move(__u.__p3_)) +{ + if (__a == allocator_type(__u.__node_alloc())) + { + __bucket_list_.reset(__u.__bucket_list_.release()); + __bucket_list_.get_deleter().size() = __u.__bucket_list_.get_deleter().size(); + __u.__bucket_list_.get_deleter().size() = 0; + if (__u.size() > 0) + { + __p1_.first().__next_ = __u.__p1_.first().__next_; + __u.__p1_.first().__next_ = nullptr; + __bucket_list_[__constrain_hash(__p1_.first().__next_->__hash(), bucket_count())] = + __p1_.first().__ptr(); + size() = __u.size(); + __u.size() = 0; + } + } +} + +#endif // _LIBCUDACXX_CXX03_LANG + +template +__hash_table<_Tp, _Hash, _Equal, _Alloc>::~__hash_table() +{ +#if defined(_LIBCUDACXX_CXX03_LANG) + static_assert((is_copy_constructible::value), + "Predicate must be copy-constructible."); + static_assert((is_copy_constructible::value), + "Hasher must be copy-constructible."); +#endif + + __deallocate_node(__p1_.first().__next_); +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + __get_db()->__erase_c(this); +#endif +} + +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__copy_assign_alloc( + const __hash_table& __u, true_type) +{ + if (__node_alloc() != __u.__node_alloc()) + { + clear(); + __bucket_list_.reset(); + __bucket_list_.get_deleter().size() = 0; + } + __bucket_list_.get_deleter().__alloc() = __u.__bucket_list_.get_deleter().__alloc(); + __node_alloc() = __u.__node_alloc(); +} + +template +__hash_table<_Tp, _Hash, _Equal, _Alloc>& +__hash_table<_Tp, _Hash, _Equal, _Alloc>::operator=(const __hash_table& __u) +{ + if (this != &__u) + { + __copy_assign_alloc(__u); + hash_function() = __u.hash_function(); + key_eq() = __u.key_eq(); + max_load_factor() = __u.max_load_factor(); + __assign_multi(__u.begin(), __u.end()); + } + return *this; +} + +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__deallocate_node(__next_pointer __np) + _NOEXCEPT +{ + __node_allocator& __na = __node_alloc(); + while (__np != nullptr) + { + __next_pointer __next = __np->__next_; +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + __c_node* __c = __get_db()->__find_c_and_lock(this); + for (__i_node** __p = __c->end_; __p != __c->beg_; ) + { + --__p; + iterator* __i = static_cast((*__p)->__i_); + if (__i->__node_ == __np) + { + (*__p)->__c_ = nullptr; + if (--__c->end_ != __p) + memmove(__p, __p+1, (__c->end_ - __p)*sizeof(__i_node*)); + } + } + __get_db()->unlock(); +#endif + __node_pointer __real_np = __np->__upcast(); + __node_traits::destroy(__na, _NodeTypes::__get_ptr(__real_np->__value_)); + __node_traits::deallocate(__na, __real_np, 1); + __np = __next; + } +} + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__next_pointer +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__detach() _NOEXCEPT +{ + size_type __bc = bucket_count(); + for (size_type __i = 0; __i < __bc; ++__i) + __bucket_list_[__i] = nullptr; + size() = 0; + __next_pointer __cache = __p1_.first().__next_; + __p1_.first().__next_ = nullptr; + return __cache; +} + +#ifndef _LIBCUDACXX_CXX03_LANG + +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__move_assign( + __hash_table& __u, true_type) + _NOEXCEPT_( + is_nothrow_move_assignable<__node_allocator>::value && + is_nothrow_move_assignable::value && + is_nothrow_move_assignable::value) +{ + clear(); + __bucket_list_.reset(__u.__bucket_list_.release()); + __bucket_list_.get_deleter().size() = __u.__bucket_list_.get_deleter().size(); + __u.__bucket_list_.get_deleter().size() = 0; + __move_assign_alloc(__u); + size() = __u.size(); + hash_function() = _CUDA_VSTD::move(__u.hash_function()); + max_load_factor() = __u.max_load_factor(); + key_eq() = _CUDA_VSTD::move(__u.key_eq()); + __p1_.first().__next_ = __u.__p1_.first().__next_; + if (size() > 0) + { + __bucket_list_[__constrain_hash(__p1_.first().__next_->__hash(), bucket_count())] = + __p1_.first().__ptr(); + __u.__p1_.first().__next_ = nullptr; + __u.size() = 0; + } +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + __get_db()->swap(this, &__u); +#endif +} + +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__move_assign( + __hash_table& __u, false_type) +{ + if (__node_alloc() == __u.__node_alloc()) + __move_assign(__u, true_type()); + else + { + hash_function() = _CUDA_VSTD::move(__u.hash_function()); + key_eq() = _CUDA_VSTD::move(__u.key_eq()); + max_load_factor() = __u.max_load_factor(); + if (bucket_count() != 0) + { + __next_pointer __cache = __detach(); +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + try + { +#endif // _LIBCUDACXX_NO_EXCEPTIONS + const_iterator __i = __u.begin(); + while (__cache != nullptr && __u.size() != 0) + { + __cache->__upcast()->__value_ = + _CUDA_VSTD::move(__u.remove(__i++)->__value_); + __next_pointer __next = __cache->__next_; + __node_insert_multi(__cache->__upcast()); + __cache = __next; + } +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + } + catch (...) + { + __deallocate_node(__cache); + throw; + } +#endif // _LIBCUDACXX_NO_EXCEPTIONS + __deallocate_node(__cache); + } + const_iterator __i = __u.begin(); + while (__u.size() != 0) + { + __node_holder __h = __construct_node(_NodeTypes::__move(__u.remove(__i++)->__value_)); + __node_insert_multi(__h.get()); + __h.release(); + } + } +} + +template +inline +__hash_table<_Tp, _Hash, _Equal, _Alloc>& +__hash_table<_Tp, _Hash, _Equal, _Alloc>::operator=(__hash_table&& __u) + _NOEXCEPT_( + __node_traits::propagate_on_container_move_assignment::value && + is_nothrow_move_assignable<__node_allocator>::value && + is_nothrow_move_assignable::value && + is_nothrow_move_assignable::value) +{ + __move_assign(__u, integral_constant()); + return *this; +} + +#endif // _LIBCUDACXX_CXX03_LANG + +template +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__assign_unique(_InputIterator __first, + _InputIterator __last) +{ + typedef iterator_traits<_InputIterator> _ITraits; + typedef typename _ITraits::value_type _ItValueType; + static_assert((is_same<_ItValueType, __container_value_type>::value), + "__assign_unique may only be called with the containers value type"); + + if (bucket_count() != 0) + { + __next_pointer __cache = __detach(); +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + try + { +#endif // _LIBCUDACXX_NO_EXCEPTIONS + for (; __cache != nullptr && __first != __last; ++__first) + { + __cache->__upcast()->__value_ = *__first; + __next_pointer __next = __cache->__next_; + __node_insert_unique(__cache->__upcast()); + __cache = __next; + } +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + } + catch (...) + { + __deallocate_node(__cache); + throw; + } +#endif // _LIBCUDACXX_NO_EXCEPTIONS + __deallocate_node(__cache); + } + for (; __first != __last; ++__first) + __insert_unique(*__first); +} + +template +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__assign_multi(_InputIterator __first, + _InputIterator __last) +{ + typedef iterator_traits<_InputIterator> _ITraits; + typedef typename _ITraits::value_type _ItValueType; + static_assert((is_same<_ItValueType, __container_value_type>::value || + is_same<_ItValueType, __node_value_type>::value), + "__assign_multi may only be called with the containers value type" + " or the nodes value type"); + if (bucket_count() != 0) + { + __next_pointer __cache = __detach(); +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + try + { +#endif // _LIBCUDACXX_NO_EXCEPTIONS + for (; __cache != nullptr && __first != __last; ++__first) + { + __cache->__upcast()->__value_ = *__first; + __next_pointer __next = __cache->__next_; + __node_insert_multi(__cache->__upcast()); + __cache = __next; + } +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + } + catch (...) + { + __deallocate_node(__cache); + throw; + } +#endif // _LIBCUDACXX_NO_EXCEPTIONS + __deallocate_node(__cache); + } + for (; __first != __last; ++__first) + __insert_multi(_NodeTypes::__get_value(*__first)); +} + +template +inline +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::begin() _NOEXCEPT +{ +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return iterator(__p1_.first().__next_, this); +#else + return iterator(__p1_.first().__next_); +#endif +} + +template +inline +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::end() _NOEXCEPT +{ +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return iterator(nullptr, this); +#else + return iterator(nullptr); +#endif +} + +template +inline +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::begin() const _NOEXCEPT +{ +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return const_iterator(__p1_.first().__next_, this); +#else + return const_iterator(__p1_.first().__next_); +#endif +} + +template +inline +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::end() const _NOEXCEPT +{ +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return const_iterator(nullptr, this); +#else + return const_iterator(nullptr); +#endif +} + +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::clear() _NOEXCEPT +{ + if (size() > 0) + { + __deallocate_node(__p1_.first().__next_); + __p1_.first().__next_ = nullptr; + size_type __bc = bucket_count(); + for (size_type __i = 0; __i < __bc; ++__i) + __bucket_list_[__i] = nullptr; + size() = 0; + } +} + + +// Prepare the container for an insertion of the value __value with the hash +// __hash. This does a lookup into the container to see if __value is already +// present, and performs a rehash if necessary. Returns a pointer to the +// existing element if it exists, otherwise nullptr. +// +// Note that this function does forward exceptions if key_eq() throws, and never +// mutates __value or actually inserts into the map. +template +_LIBCUDACXX_INLINE_VISIBILITY +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__next_pointer +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_unique_prepare( + size_t __hash, value_type& __value) +{ + size_type __bc = bucket_count(); + + if (__bc != 0) + { + size_t __chash = __constrain_hash(__hash, __bc); + __next_pointer __ndptr = __bucket_list_[__chash]; + if (__ndptr != nullptr) + { + for (__ndptr = __ndptr->__next_; __ndptr != nullptr && + __constrain_hash(__ndptr->__hash(), __bc) == __chash; + __ndptr = __ndptr->__next_) + { + if (key_eq()(__ndptr->__upcast()->__value_, __value)) + return __ndptr; + } + } + } + if (size()+1 > __bc * max_load_factor() || __bc == 0) + { + rehash(_CUDA_VSTD::max(2 * __bc + !__is_hash_power2(__bc), + size_type(ceil(float(size() + 1) / max_load_factor())))); + } + return nullptr; +} + +// Insert the node __nd into the container by pushing it into the right bucket, +// and updating size(). Assumes that __nd->__hash is up-to-date, and that +// rehashing has already occurred and that no element with the same key exists +// in the map. +template +_LIBCUDACXX_INLINE_VISIBILITY +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_unique_perform( + __node_pointer __nd) _NOEXCEPT +{ + size_type __bc = bucket_count(); + size_t __chash = __constrain_hash(__nd->__hash(), __bc); + // insert_after __bucket_list_[__chash], or __first_node if bucket is null + __next_pointer __pn = __bucket_list_[__chash]; + if (__pn == nullptr) + { + __pn =__p1_.first().__ptr(); + __nd->__next_ = __pn->__next_; + __pn->__next_ = __nd->__ptr(); + // fix up __bucket_list_ + __bucket_list_[__chash] = __pn; + if (__nd->__next_ != nullptr) + __bucket_list_[__constrain_hash(__nd->__next_->__hash(), __bc)] = __nd->__ptr(); + } + else + { + __nd->__next_ = __pn->__next_; + __pn->__next_ = __nd->__ptr(); + } + ++size(); +} + +template +pair::iterator, bool> +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_unique(__node_pointer __nd) +{ + __nd->__hash_ = hash_function()(__nd->__value_); + __next_pointer __existing_node = + __node_insert_unique_prepare(__nd->__hash(), __nd->__value_); + + // Insert the node, unless it already exists in the container. + bool __inserted = false; + if (__existing_node == nullptr) + { + __node_insert_unique_perform(__nd); + __existing_node = __nd->__ptr(); + __inserted = true; + } +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return pair(iterator(__existing_node, this), __inserted); +#else + return pair(iterator(__existing_node), __inserted); +#endif +} + +// Prepare the container for an insertion of the value __cp_val with the hash +// __cp_hash. This does a lookup into the container to see if __cp_value is +// already present, and performs a rehash if necessary. Returns a pointer to the +// last occurance of __cp_val in the map. +// +// Note that this function does forward exceptions if key_eq() throws, and never +// mutates __value or actually inserts into the map. +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__next_pointer +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_multi_prepare( + size_t __cp_hash, value_type& __cp_val) +{ + size_type __bc = bucket_count(); + if (size()+1 > __bc * max_load_factor() || __bc == 0) + { + rehash(_CUDA_VSTD::max(2 * __bc + !__is_hash_power2(__bc), + size_type(ceil(float(size() + 1) / max_load_factor())))); + __bc = bucket_count(); + } + size_t __chash = __constrain_hash(__cp_hash, __bc); + __next_pointer __pn = __bucket_list_[__chash]; + if (__pn != nullptr) + { + for (bool __found = false; __pn->__next_ != nullptr && + __constrain_hash(__pn->__next_->__hash(), __bc) == __chash; + __pn = __pn->__next_) + { + // __found key_eq() action + // false false loop + // true true loop + // false true set __found to true + // true false break + if (__found != (__pn->__next_->__hash() == __cp_hash && + key_eq()(__pn->__next_->__upcast()->__value_, __cp_val))) + { + if (!__found) + __found = true; + else + break; + } + } + } + return __pn; +} + +// Insert the node __cp into the container after __pn (which is the last node in +// the bucket that compares equal to __cp). Rehashing, and checking for +// uniqueness has already been performed (in __node_insert_multi_prepare), so +// all we need to do is update the bucket and size(). Assumes that __cp->__hash +// is up-to-date. +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_multi_perform( + __node_pointer __cp, __next_pointer __pn) _NOEXCEPT +{ + size_type __bc = bucket_count(); + size_t __chash = __constrain_hash(__cp->__hash_, __bc); + if (__pn == nullptr) + { + __pn =__p1_.first().__ptr(); + __cp->__next_ = __pn->__next_; + __pn->__next_ = __cp->__ptr(); + // fix up __bucket_list_ + __bucket_list_[__chash] = __pn; + if (__cp->__next_ != nullptr) + __bucket_list_[__constrain_hash(__cp->__next_->__hash(), __bc)] + = __cp->__ptr(); + } + else + { + __cp->__next_ = __pn->__next_; + __pn->__next_ = __cp->__ptr(); + if (__cp->__next_ != nullptr) + { + size_t __nhash = __constrain_hash(__cp->__next_->__hash(), __bc); + if (__nhash != __chash) + __bucket_list_[__nhash] = __cp->__ptr(); + } + } + ++size(); +} + + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_multi(__node_pointer __cp) +{ + __cp->__hash_ = hash_function()(__cp->__value_); + __next_pointer __pn = __node_insert_multi_prepare(__cp->__hash(), __cp->__value_); + __node_insert_multi_perform(__cp, __pn); + +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return iterator(__cp->__ptr(), this); +#else + return iterator(__cp->__ptr()); +#endif +} + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_insert_multi( + const_iterator __p, __node_pointer __cp) +{ +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this, + "unordered container::emplace_hint(const_iterator, args...) called with an iterator not" + " referring to this unordered container"); +#endif + if (__p != end() && key_eq()(*__p, __cp->__value_)) + { + __next_pointer __np = __p.__node_; + __cp->__hash_ = __np->__hash(); + size_type __bc = bucket_count(); + if (size()+1 > __bc * max_load_factor() || __bc == 0) + { + rehash(_CUDA_VSTD::max(2 * __bc + !__is_hash_power2(__bc), + size_type(ceil(float(size() + 1) / max_load_factor())))); + __bc = bucket_count(); + } + size_t __chash = __constrain_hash(__cp->__hash_, __bc); + __next_pointer __pp = __bucket_list_[__chash]; + while (__pp->__next_ != __np) + __pp = __pp->__next_; + __cp->__next_ = __np; + __pp->__next_ = static_cast<__next_pointer>(__cp); + ++size(); +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return iterator(static_cast<__next_pointer>(__cp), this); +#else + return iterator(static_cast<__next_pointer>(__cp)); +#endif + } + return __node_insert_multi(__cp); +} + + + +#ifndef _LIBCUDACXX_CXX03_LANG +template +template +pair::iterator, bool> +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__emplace_unique_key_args(_Key const& __k, _Args&&... __args) +#else +template +template +pair::iterator, bool> +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__emplace_unique_key_args(_Key const& __k, _Args& __args) +#endif +{ + + size_t __hash = hash_function()(__k); + size_type __bc = bucket_count(); + bool __inserted = false; + __next_pointer __nd; + size_t __chash; + if (__bc != 0) + { + __chash = __constrain_hash(__hash, __bc); + __nd = __bucket_list_[__chash]; + if (__nd != nullptr) + { + for (__nd = __nd->__next_; __nd != nullptr && + (__nd->__hash() == __hash || __constrain_hash(__nd->__hash(), __bc) == __chash); + __nd = __nd->__next_) + { + if (key_eq()(__nd->__upcast()->__value_, __k)) + goto __done; + } + } + } + { +#ifndef _LIBCUDACXX_CXX03_LANG + __node_holder __h = __construct_node_hash(__hash, _CUDA_VSTD::forward<_Args>(__args)...); +#else + __node_holder __h = __construct_node_hash(__hash, __args); +#endif + if (size()+1 > __bc * max_load_factor() || __bc == 0) + { + rehash(_CUDA_VSTD::max(2 * __bc + !__is_hash_power2(__bc), + size_type(ceil(float(size() + 1) / max_load_factor())))); + __bc = bucket_count(); + __chash = __constrain_hash(__hash, __bc); + } + // insert_after __bucket_list_[__chash], or __first_node if bucket is null + __next_pointer __pn = __bucket_list_[__chash]; + if (__pn == nullptr) + { + __pn = __p1_.first().__ptr(); + __h->__next_ = __pn->__next_; + __pn->__next_ = __h.get()->__ptr(); + // fix up __bucket_list_ + __bucket_list_[__chash] = __pn; + if (__h->__next_ != nullptr) + __bucket_list_[__constrain_hash(__h->__next_->__hash(), __bc)] + = __h.get()->__ptr(); + } + else + { + __h->__next_ = __pn->__next_; + __pn->__next_ = static_cast<__next_pointer>(__h.get()); + } + __nd = static_cast<__next_pointer>(__h.release()); + // increment size + ++size(); + __inserted = true; + } +__done: +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return pair(iterator(__nd, this), __inserted); +#else + return pair(iterator(__nd), __inserted); +#endif +} + +#ifndef _LIBCUDACXX_CXX03_LANG + +template +template +pair::iterator, bool> +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__emplace_unique_impl(_Args&&... __args) +{ + __node_holder __h = __construct_node(_CUDA_VSTD::forward<_Args>(__args)...); + pair __r = __node_insert_unique(__h.get()); + if (__r.second) + __h.release(); + return __r; +} + +template +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__emplace_multi(_Args&&... __args) +{ + __node_holder __h = __construct_node(_CUDA_VSTD::forward<_Args>(__args)...); + iterator __r = __node_insert_multi(__h.get()); + __h.release(); + return __r; +} + +template +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__emplace_hint_multi( + const_iterator __p, _Args&&... __args) +{ +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this, + "unordered container::emplace_hint(const_iterator, args...) called with an iterator not" + " referring to this unordered container"); +#endif + __node_holder __h = __construct_node(_CUDA_VSTD::forward<_Args>(__args)...); + iterator __r = __node_insert_multi(__p, __h.get()); + __h.release(); + return __r; +} + +#else // _LIBCUDACXX_CXX03_LANG + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__insert_multi(const __container_value_type& __x) +{ + __node_holder __h = __construct_node(__x); + iterator __r = __node_insert_multi(__h.get()); + __h.release(); + return __r; +} + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__insert_multi(const_iterator __p, + const __container_value_type& __x) +{ +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this, + "unordered container::insert(const_iterator, lvalue) called with an iterator not" + " referring to this unordered container"); +#endif + __node_holder __h = __construct_node(__x); + iterator __r = __node_insert_multi(__p, __h.get()); + __h.release(); + return __r; +} + +#endif // _LIBCUDACXX_CXX03_LANG + +#if _LIBCUDACXX_STD_VER > 14 +template +template +_LIBCUDACXX_INLINE_VISIBILITY +_InsertReturnType +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_handle_insert_unique( + _NodeHandle&& __nh) +{ + if (__nh.empty()) + return _InsertReturnType{end(), false, _NodeHandle()}; + pair __result = __node_insert_unique(__nh.__ptr_); + if (__result.second) + __nh.__release_ptr(); + return _InsertReturnType{__result.first, __result.second, _CUDA_VSTD::move(__nh)}; +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_handle_insert_unique( + const_iterator, _NodeHandle&& __nh) +{ + if (__nh.empty()) + return end(); + pair __result = __node_insert_unique(__nh.__ptr_); + if (__result.second) + __nh.__release_ptr(); + return __result.first; +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +_NodeHandle +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_handle_extract( + key_type const& __key) +{ + iterator __i = find(__key); + if (__i == end()) + return _NodeHandle(); + return __node_handle_extract<_NodeHandle>(__i); +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +_NodeHandle +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_handle_extract( + const_iterator __p) +{ + allocator_type __alloc(__node_alloc()); + return _NodeHandle(remove(__p).release(), __alloc); +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_handle_merge_unique( + _Table& __source) +{ + static_assert(is_same<__node, typename _Table::__node>::value, ""); + + for (typename _Table::iterator __it = __source.begin(); + __it != __source.end();) + { + __node_pointer __src_ptr = __it.__node_->__upcast(); + size_t __hash = hash_function()(__src_ptr->__value_); + __next_pointer __existing_node = + __node_insert_unique_prepare(__hash, __src_ptr->__value_); + auto __prev_iter = __it++; + if (__existing_node == nullptr) + { + (void)__source.remove(__prev_iter).release(); + __src_ptr->__hash_ = __hash; + __node_insert_unique_perform(__src_ptr); + } + } +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_handle_insert_multi( + _NodeHandle&& __nh) +{ + if (__nh.empty()) + return end(); + iterator __result = __node_insert_multi(__nh.__ptr_); + __nh.__release_ptr(); + return __result; +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_handle_insert_multi( + const_iterator __hint, _NodeHandle&& __nh) +{ + if (__nh.empty()) + return end(); + iterator __result = __node_insert_multi(__hint, __nh.__ptr_); + __nh.__release_ptr(); + return __result; +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_handle_merge_multi( + _Table& __source) +{ + static_assert(is_same::value, ""); + + for (typename _Table::iterator __it = __source.begin(); + __it != __source.end();) + { + __node_pointer __src_ptr = __it.__node_->__upcast(); + size_t __src_hash = hash_function()(__src_ptr->__value_); + __next_pointer __pn = + __node_insert_multi_prepare(__src_hash, __src_ptr->__value_); + (void)__source.remove(__it++).release(); + __src_ptr->__hash_ = __src_hash; + __node_insert_multi_perform(__src_ptr, __pn); + } +} +#endif // _LIBCUDACXX_STD_VER > 14 + +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::rehash(size_type __n) +_LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK +{ + if (__n == 1) + __n = 2; + else if (__n & (__n - 1)) + __n = __next_prime(__n); + size_type __bc = bucket_count(); + if (__n > __bc) + __rehash(__n); + else if (__n < __bc) + { + __n = _CUDA_VSTD::max + ( + __n, + __is_hash_power2(__bc) ? __next_hash_pow2(size_t(ceil(float(size()) / max_load_factor()))) : + __next_prime(size_t(ceil(float(size()) / max_load_factor()))) + ); + if (__n < __bc) + __rehash(__n); + } +} + +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__rehash(size_type __nbc) +{ +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + __get_db()->__invalidate_all(this); +#endif // _LIBCUDACXX_DEBUG_LEVEL >= 2 + __pointer_allocator& __npa = __bucket_list_.get_deleter().__alloc(); + __bucket_list_.reset(__nbc > 0 ? + __pointer_alloc_traits::allocate(__npa, __nbc) : nullptr); + __bucket_list_.get_deleter().size() = __nbc; + if (__nbc > 0) + { + for (size_type __i = 0; __i < __nbc; ++__i) + __bucket_list_[__i] = nullptr; + __next_pointer __pp = __p1_.first().__ptr(); + __next_pointer __cp = __pp->__next_; + if (__cp != nullptr) + { + size_type __chash = __constrain_hash(__cp->__hash(), __nbc); + __bucket_list_[__chash] = __pp; + size_type __phash = __chash; + for (__pp = __cp, __cp = __cp->__next_; __cp != nullptr; + __cp = __pp->__next_) + { + __chash = __constrain_hash(__cp->__hash(), __nbc); + if (__chash == __phash) + __pp = __cp; + else + { + if (__bucket_list_[__chash] == nullptr) + { + __bucket_list_[__chash] = __pp; + __pp = __cp; + __phash = __chash; + } + else + { + __next_pointer __np = __cp; + for (; __np->__next_ != nullptr && + key_eq()(__cp->__upcast()->__value_, + __np->__next_->__upcast()->__value_); + __np = __np->__next_) + ; + __pp->__next_ = __np->__next_; + __np->__next_ = __bucket_list_[__chash]->__next_; + __bucket_list_[__chash]->__next_ = __cp; + + } + } + } + } + } +} + +template +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::find(const _Key& __k) +{ + size_t __hash = hash_function()(__k); + size_type __bc = bucket_count(); + if (__bc != 0) + { + size_t __chash = __constrain_hash(__hash, __bc); + __next_pointer __nd = __bucket_list_[__chash]; + if (__nd != nullptr) + { + for (__nd = __nd->__next_; __nd != nullptr && + (__nd->__hash() == __hash + || __constrain_hash(__nd->__hash(), __bc) == __chash); + __nd = __nd->__next_) + { + if ((__nd->__hash() == __hash) + && key_eq()(__nd->__upcast()->__value_, __k)) +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return iterator(__nd, this); +#else + return iterator(__nd); +#endif + } + } + } + return end(); +} + +template +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::find(const _Key& __k) const +{ + size_t __hash = hash_function()(__k); + size_type __bc = bucket_count(); + if (__bc != 0) + { + size_t __chash = __constrain_hash(__hash, __bc); + __next_pointer __nd = __bucket_list_[__chash]; + if (__nd != nullptr) + { + for (__nd = __nd->__next_; __nd != nullptr && + (__hash == __nd->__hash() + || __constrain_hash(__nd->__hash(), __bc) == __chash); + __nd = __nd->__next_) + { + if ((__nd->__hash() == __hash) + && key_eq()(__nd->__upcast()->__value_, __k)) +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return const_iterator(__nd, this); +#else + return const_iterator(__nd); +#endif + } + } + + } + return end(); +} + +#ifndef _LIBCUDACXX_CXX03_LANG + +template +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_holder +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__construct_node(_Args&& ...__args) +{ + static_assert(!__is_hash_value_type<_Args...>::value, + "Construct cannot be called with a hash value type"); + __node_allocator& __na = __node_alloc(); + __node_holder __h(__node_traits::allocate(__na, 1), _Dp(__na)); + __node_traits::construct(__na, _NodeTypes::__get_ptr(__h->__value_), _CUDA_VSTD::forward<_Args>(__args)...); + __h.get_deleter().__value_constructed = true; + __h->__hash_ = hash_function()(__h->__value_); + __h->__next_ = nullptr; + return __h; +} + +template +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_holder +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__construct_node_hash( + size_t __hash, _First&& __f, _Rest&& ...__rest) +{ + static_assert(!__is_hash_value_type<_First, _Rest...>::value, + "Construct cannot be called with a hash value type"); + __node_allocator& __na = __node_alloc(); + __node_holder __h(__node_traits::allocate(__na, 1), _Dp(__na)); + __node_traits::construct(__na, _NodeTypes::__get_ptr(__h->__value_), + _CUDA_VSTD::forward<_First>(__f), + _CUDA_VSTD::forward<_Rest>(__rest)...); + __h.get_deleter().__value_constructed = true; + __h->__hash_ = __hash; + __h->__next_ = nullptr; + return __h; +} + +#else // _LIBCUDACXX_CXX03_LANG + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_holder +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__construct_node(const __container_value_type& __v) +{ + __node_allocator& __na = __node_alloc(); + __node_holder __h(__node_traits::allocate(__na, 1), _Dp(__na)); + __node_traits::construct(__na, _NodeTypes::__get_ptr(__h->__value_), __v); + __h.get_deleter().__value_constructed = true; + __h->__hash_ = hash_function()(__h->__value_); + __h->__next_ = nullptr; + return _LIBCUDACXX_EXPLICIT_MOVE(__h); // explicitly moved for C++03 +} + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_holder +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__construct_node_hash(size_t __hash, + const __container_value_type& __v) +{ + __node_allocator& __na = __node_alloc(); + __node_holder __h(__node_traits::allocate(__na, 1), _Dp(__na)); + __node_traits::construct(__na, _NodeTypes::__get_ptr(__h->__value_), __v); + __h.get_deleter().__value_constructed = true; + __h->__hash_ = __hash; + __h->__next_ = nullptr; + return _LIBCUDACXX_EXPLICIT_MOVE(__h); // explicitly moved for C++03 +} + +#endif // _LIBCUDACXX_CXX03_LANG + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::erase(const_iterator __p) +{ + __next_pointer __np = __p.__node_; +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_ASSERT(__get_const_db()->__find_c_from_i(&__p) == this, + "unordered container erase(iterator) called with an iterator not" + " referring to this container"); + _LIBCUDACXX_ASSERT(__p != end(), + "unordered container erase(iterator) called with a non-dereferenceable iterator"); + iterator __r(__np, this); +#else + iterator __r(__np); +#endif + ++__r; + remove(__p); + return __r; +} + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator +__hash_table<_Tp, _Hash, _Equal, _Alloc>::erase(const_iterator __first, + const_iterator __last) +{ +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + _LIBCUDACXX_ASSERT(__get_const_db()->__find_c_from_i(&__first) == this, + "unodered container::erase(iterator, iterator) called with an iterator not" + " referring to this unodered container"); + _LIBCUDACXX_ASSERT(__get_const_db()->__find_c_from_i(&__last) == this, + "unodered container::erase(iterator, iterator) called with an iterator not" + " referring to this unodered container"); +#endif + for (const_iterator __p = __first; __first != __last; __p = __first) + { + ++__first; + erase(__p); + } + __next_pointer __np = __last.__node_; +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + return iterator (__np, this); +#else + return iterator (__np); +#endif +} + +template +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::size_type +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__erase_unique(const _Key& __k) +{ + iterator __i = find(__k); + if (__i == end()) + return 0; + erase(__i); + return 1; +} + +template +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::size_type +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__erase_multi(const _Key& __k) +{ + size_type __r = 0; + iterator __i = find(__k); + if (__i != end()) + { + iterator __e = end(); + do + { + erase(__i++); + ++__r; + } while (__i != __e && key_eq()(*__i, __k)); + } + return __r; +} + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::__node_holder +__hash_table<_Tp, _Hash, _Equal, _Alloc>::remove(const_iterator __p) _NOEXCEPT +{ + // current node + __next_pointer __cn = __p.__node_; + size_type __bc = bucket_count(); + size_t __chash = __constrain_hash(__cn->__hash(), __bc); + // find previous node + __next_pointer __pn = __bucket_list_[__chash]; + for (; __pn->__next_ != __cn; __pn = __pn->__next_) + ; + // Fix up __bucket_list_ + // if __pn is not in same bucket (before begin is not in same bucket) && + // if __cn->__next_ is not in same bucket (nullptr is not in same bucket) + if (__pn == __p1_.first().__ptr() + || __constrain_hash(__pn->__hash(), __bc) != __chash) + { + if (__cn->__next_ == nullptr + || __constrain_hash(__cn->__next_->__hash(), __bc) != __chash) + __bucket_list_[__chash] = nullptr; + } + // if __cn->__next_ is not in same bucket (nullptr is in same bucket) + if (__cn->__next_ != nullptr) + { + size_t __nhash = __constrain_hash(__cn->__next_->__hash(), __bc); + if (__nhash != __chash) + __bucket_list_[__nhash] = __pn; + } + // remove __cn + __pn->__next_ = __cn->__next_; + __cn->__next_ = nullptr; + --size(); +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + __c_node* __c = __get_db()->__find_c_and_lock(this); + for (__i_node** __dp = __c->end_; __dp != __c->beg_; ) + { + --__dp; + iterator* __i = static_cast((*__dp)->__i_); + if (__i->__node_ == __cn) + { + (*__dp)->__c_ = nullptr; + if (--__c->end_ != __dp) + memmove(__dp, __dp+1, (__c->end_ - __dp)*sizeof(__i_node*)); + } + } + __get_db()->unlock(); +#endif + return __node_holder(__cn->__upcast(), _Dp(__node_alloc(), true)); +} + +template +template +inline +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::size_type +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__count_unique(const _Key& __k) const +{ + return static_cast(find(__k) != end()); +} + +template +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::size_type +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__count_multi(const _Key& __k) const +{ + size_type __r = 0; + const_iterator __i = find(__k); + if (__i != end()) + { + const_iterator __e = end(); + do + { + ++__i; + ++__r; + } while (__i != __e && key_eq()(*__i, __k)); + } + return __r; +} + +template +template +pair::iterator, + typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator> +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__equal_range_unique( + const _Key& __k) +{ + iterator __i = find(__k); + iterator __j = __i; + if (__i != end()) + ++__j; + return pair(__i, __j); +} + +template +template +pair::const_iterator, + typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator> +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__equal_range_unique( + const _Key& __k) const +{ + const_iterator __i = find(__k); + const_iterator __j = __i; + if (__i != end()) + ++__j; + return pair(__i, __j); +} + +template +template +pair::iterator, + typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::iterator> +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__equal_range_multi( + const _Key& __k) +{ + iterator __i = find(__k); + iterator __j = __i; + if (__i != end()) + { + iterator __e = end(); + do + { + ++__j; + } while (__j != __e && key_eq()(*__j, __k)); + } + return pair(__i, __j); +} + +template +template +pair::const_iterator, + typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::const_iterator> +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__equal_range_multi( + const _Key& __k) const +{ + const_iterator __i = find(__k); + const_iterator __j = __i; + if (__i != end()) + { + const_iterator __e = end(); + do + { + ++__j; + } while (__j != __e && key_eq()(*__j, __k)); + } + return pair(__i, __j); +} + +template +void +__hash_table<_Tp, _Hash, _Equal, _Alloc>::swap(__hash_table& __u) +#if _LIBCUDACXX_STD_VER <= 11 + _NOEXCEPT_( + __is_nothrow_swappable::value && __is_nothrow_swappable::value + && (!allocator_traits<__pointer_allocator>::propagate_on_container_swap::value + || __is_nothrow_swappable<__pointer_allocator>::value) + && (!__node_traits::propagate_on_container_swap::value + || __is_nothrow_swappable<__node_allocator>::value) + ) +#else + _NOEXCEPT_(__is_nothrow_swappable::value && __is_nothrow_swappable::value) +#endif +{ + _LIBCUDACXX_ASSERT(__node_traits::propagate_on_container_swap::value || + this->__node_alloc() == __u.__node_alloc(), + "list::swap: Either propagate_on_container_swap must be true" + " or the allocators must compare equal"); + { + __node_pointer_pointer __npp = __bucket_list_.release(); + __bucket_list_.reset(__u.__bucket_list_.release()); + __u.__bucket_list_.reset(__npp); + } + _CUDA_VSTD::swap(__bucket_list_.get_deleter().size(), __u.__bucket_list_.get_deleter().size()); + __swap_allocator(__bucket_list_.get_deleter().__alloc(), + __u.__bucket_list_.get_deleter().__alloc()); + __swap_allocator(__node_alloc(), __u.__node_alloc()); + _CUDA_VSTD::swap(__p1_.first().__next_, __u.__p1_.first().__next_); + __p2_.swap(__u.__p2_); + __p3_.swap(__u.__p3_); + if (size() > 0) + __bucket_list_[__constrain_hash(__p1_.first().__next_->__hash(), bucket_count())] = + __p1_.first().__ptr(); + if (__u.size() > 0) + __u.__bucket_list_[__constrain_hash(__u.__p1_.first().__next_->__hash(), __u.bucket_count())] = + __u.__p1_.first().__ptr(); +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + __get_db()->swap(this, &__u); +#endif +} + +template +typename __hash_table<_Tp, _Hash, _Equal, _Alloc>::size_type +__hash_table<_Tp, _Hash, _Equal, _Alloc>::bucket_size(size_type __n) const +{ + _LIBCUDACXX_ASSERT(__n < bucket_count(), + "unordered container::bucket_size(n) called with n >= bucket_count()"); + __next_pointer __np = __bucket_list_[__n]; + size_type __bc = bucket_count(); + size_type __r = 0; + if (__np != nullptr) + { + for (__np = __np->__next_; __np != nullptr && + __constrain_hash(__np->__hash(), __bc) == __n; + __np = __np->__next_, ++__r) + ; + } + return __r; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(__hash_table<_Tp, _Hash, _Equal, _Alloc>& __x, + __hash_table<_Tp, _Hash, _Equal, _Alloc>& __y) + _NOEXCEPT_(_NOEXCEPT_(__x.swap(__y))) +{ + __x.swap(__y); +} + +#if _LIBCUDACXX_DEBUG_LEVEL >= 2 + +template +bool +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__dereferenceable(const const_iterator* __i) const +{ + return __i->__node_ != nullptr; +} + +template +bool +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__decrementable(const const_iterator*) const +{ + return false; +} + +template +bool +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__addable(const const_iterator*, ptrdiff_t) const +{ + return false; +} + +template +bool +__hash_table<_Tp, _Hash, _Equal, _Alloc>::__subscriptable(const const_iterator*, ptrdiff_t) const +{ + return false; +} + +#endif // _LIBCUDACXX_DEBUG_LEVEL >= 2 + +_LIBCUDACXX_END_NAMESPACE_STD + +_LIBCUDACXX_POP_MACROS + +#endif // _LIBCUDACXX__HASH_TABLE diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__libcpp_version b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__libcpp_version new file mode 100644 index 000000000000..5caff40c4a0c --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__libcpp_version @@ -0,0 +1 @@ +10000 diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__locale b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__locale new file mode 100644 index 000000000000..2b2df18624ef --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__locale @@ -0,0 +1,1553 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___LOCALE +#define _LIBCUDACXX___LOCALE + +#include <__config> +#include +#include +#include +#include +#include +#include +#include +#if defined(_LIBCUDACXX_MSVCRT_LIKE) +# include +# include +#elif defined(_AIX) +# include +#elif defined(__ANDROID__) +# include +#elif defined(__sun__) +# include +# include +#elif defined(_NEWLIB_VERSION) +# include +#elif (defined(__APPLE__) || defined(__FreeBSD__) \ + || defined(__EMSCRIPTEN__) || defined(__IBMCPP__)) +# include +#elif defined(__Fuchsia__) +# include +#elif defined(__wasi__) +// WASI libc uses musl's locales support. +# include +#elif defined(_LIBCUDACXX_HAS_MUSL_LIBC) +# include +#endif + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +#if !defined(_LIBCUDACXX_LOCALE__L_EXTENSIONS) +struct __libcpp_locale_guard { + _LIBCUDACXX_INLINE_VISIBILITY + __libcpp_locale_guard(locale_t& __loc) : __old_loc_(uselocale(__loc)) {} + + _LIBCUDACXX_INLINE_VISIBILITY + ~__libcpp_locale_guard() { + if (__old_loc_) + uselocale(__old_loc_); + } + + locale_t __old_loc_; +private: + __libcpp_locale_guard(__libcpp_locale_guard const&); + __libcpp_locale_guard& operator=(__libcpp_locale_guard const&); +}; +#elif defined(_LIBCUDACXX_MSVCRT_LIKE) +struct __libcpp_locale_guard { + __libcpp_locale_guard(locale_t __l) : + __status(_configthreadlocale(_ENABLE_PER_THREAD_LOCALE)) { + // Setting the locale can be expensive even when the locale given is + // already the current locale, so do an explicit check to see if the + // current locale is already the one we want. + const char* __lc = __setlocale(nullptr); + // If every category is the same, the locale string will simply be the + // locale name, otherwise it will be a semicolon-separated string listing + // each category. In the second case, we know at least one category won't + // be what we want, so we only have to check the first case. + if (strcmp(__l.__get_locale(), __lc) != 0) { + __locale_all = _strdup(__lc); + if (__locale_all == nullptr) + __throw_bad_alloc(); + __setlocale(__l.__get_locale()); + } + } + ~__libcpp_locale_guard() { + // The CRT documentation doesn't explicitly say, but setlocale() does the + // right thing when given a semicolon-separated list of locale settings + // for the different categories in the same format as returned by + // setlocale(LC_ALL, nullptr). + if (__locale_all != nullptr) { + __setlocale(__locale_all); + free(__locale_all); + } + _configthreadlocale(__status); + } + static const char* __setlocale(const char* __locale) { + const char* __new_locale = setlocale(LC_ALL, __locale); + if (__new_locale == nullptr) + __throw_bad_alloc(); + return __new_locale; + } + int __status; + char* __locale_all = nullptr; +}; +#endif + + +class _LIBCUDACXX_TYPE_VIS locale; + +template +_LIBCUDACXX_INLINE_VISIBILITY +bool +has_facet(const locale&) _NOEXCEPT; + +template +_LIBCUDACXX_INLINE_VISIBILITY +const _Facet& +use_facet(const locale&); + +class _LIBCUDACXX_TYPE_VIS locale +{ +public: + // types: + class _LIBCUDACXX_TYPE_VIS facet; + class _LIBCUDACXX_TYPE_VIS id; + + typedef int category; + _LIBCUDACXX_AVAILABILITY_LOCALE_CATEGORY + static const category // values assigned here are for exposition only + none = 0, + collate = LC_COLLATE_MASK, + ctype = LC_CTYPE_MASK, + monetary = LC_MONETARY_MASK, + numeric = LC_NUMERIC_MASK, + time = LC_TIME_MASK, + messages = LC_MESSAGES_MASK, + all = collate | ctype | monetary | numeric | time | messages; + + // construct/copy/destroy: + locale() _NOEXCEPT; + locale(const locale&) _NOEXCEPT; + explicit locale(const char*); + explicit locale(const string&); + locale(const locale&, const char*, category); + locale(const locale&, const string&, category); + template + _LIBCUDACXX_INLINE_VISIBILITY locale(const locale&, _Facet*); + locale(const locale&, const locale&, category); + + ~locale(); + + const locale& operator=(const locale&) _NOEXCEPT; + + template + _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS + locale combine(const locale&) const; + + // locale operations: + string name() const; + bool operator==(const locale&) const; + bool operator!=(const locale& __y) const {return !(*this == __y);} + template + _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS + bool operator()(const basic_string<_CharT, _Traits, _Allocator>&, + const basic_string<_CharT, _Traits, _Allocator>&) const; + + // global locale objects: + static locale global(const locale&); + static const locale& classic(); + +private: + class __imp; + __imp* __locale_; + + void __install_ctor(const locale&, facet*, long); + static locale& __global(); + bool has_facet(id&) const; + const facet* use_facet(id&) const; + + template friend bool has_facet(const locale&) _NOEXCEPT; + template friend const _Facet& use_facet(const locale&); +}; + +class _LIBCUDACXX_TYPE_VIS locale::facet + : public __shared_count +{ +protected: + _LIBCUDACXX_INLINE_VISIBILITY + explicit facet(size_t __refs = 0) + : __shared_count(static_cast(__refs)-1) {} + + virtual ~facet(); + +// facet(const facet&) = delete; // effectively done in __shared_count +// void operator=(const facet&) = delete; +private: + virtual void __on_zero_shared() _NOEXCEPT; +}; + +class _LIBCUDACXX_TYPE_VIS locale::id +{ + once_flag __flag_; + int32_t __id_; + + static int32_t __next_id; +public: + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR id() :__id_(0) {} +private: + void __init(); + void operator=(const id&); // = delete; + id(const id&); // = delete; +public: // only needed for tests + long __get(); + + friend class locale; + friend class locale::__imp; +}; + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +locale::locale(const locale& __other, _Facet* __f) +{ + __install_ctor(__other, __f, __f ? __f->id.__get() : 0); +} + +template +locale +locale::combine(const locale& __other) const +{ + if (!_CUDA_VSTD::has_facet<_Facet>(__other)) + __throw_runtime_error("locale::combine: locale missing facet"); + + return locale(*this, &const_cast<_Facet&>(_CUDA_VSTD::use_facet<_Facet>(__other))); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +has_facet(const locale& __l) _NOEXCEPT +{ + return __l.has_facet(_Facet::id); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +const _Facet& +use_facet(const locale& __l) +{ + return static_cast(*__l.use_facet(_Facet::id)); +} + +// template class collate; + +template +class _LIBCUDACXX_TEMPLATE_VIS collate + : public locale::facet +{ +public: + typedef _CharT char_type; + typedef basic_string string_type; + + _LIBCUDACXX_INLINE_VISIBILITY + explicit collate(size_t __refs = 0) + : locale::facet(__refs) {} + + _LIBCUDACXX_INLINE_VISIBILITY + int compare(const char_type* __lo1, const char_type* __hi1, + const char_type* __lo2, const char_type* __hi2) const + { + return do_compare(__lo1, __hi1, __lo2, __hi2); + } + + // FIXME(EricWF): The _LIBCUDACXX_ALWAYS_INLINE is needed on Windows to work + // around a dllimport bug that expects an external instantiation. + _LIBCUDACXX_INLINE_VISIBILITY + _LIBCUDACXX_ALWAYS_INLINE + string_type transform(const char_type* __lo, const char_type* __hi) const + { + return do_transform(__lo, __hi); + } + + _LIBCUDACXX_INLINE_VISIBILITY + long hash(const char_type* __lo, const char_type* __hi) const + { + return do_hash(__lo, __hi); + } + + static locale::id id; + +protected: + ~collate(); + virtual int do_compare(const char_type* __lo1, const char_type* __hi1, + const char_type* __lo2, const char_type* __hi2) const; + virtual string_type do_transform(const char_type* __lo, const char_type* __hi) const + {return string_type(__lo, __hi);} + virtual long do_hash(const char_type* __lo, const char_type* __hi) const; +}; + +template locale::id collate<_CharT>::id; + +template +collate<_CharT>::~collate() +{ +} + +template +int +collate<_CharT>::do_compare(const char_type* __lo1, const char_type* __hi1, + const char_type* __lo2, const char_type* __hi2) const +{ + for (; __lo2 != __hi2; ++__lo1, ++__lo2) + { + if (__lo1 == __hi1 || *__lo1 < *__lo2) + return -1; + if (*__lo2 < *__lo1) + return 1; + } + return __lo1 != __hi1; +} + +template +long +collate<_CharT>::do_hash(const char_type* __lo, const char_type* __hi) const +{ + size_t __h = 0; + const size_t __sr = __CHAR_BIT__ * sizeof(size_t) - 8; + const size_t __mask = size_t(0xF) << (__sr + 4); + for(const char_type* __p = __lo; __p != __hi; ++__p) + { + __h = (__h << 4) + static_cast(*__p); + size_t __g = __h & __mask; + __h ^= __g | (__g >> __sr); + } + return static_cast(__h); +} + +_LIBCUDACXX_EXTERN_TEMPLATE2(class _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS collate) +_LIBCUDACXX_EXTERN_TEMPLATE2(class _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS collate) + +// template class collate_byname; + +template class _LIBCUDACXX_TEMPLATE_VIS collate_byname; + +template <> +class _LIBCUDACXX_TYPE_VIS collate_byname + : public collate +{ + locale_t __l; +public: + typedef char char_type; + typedef basic_string string_type; + + explicit collate_byname(const char* __n, size_t __refs = 0); + explicit collate_byname(const string& __n, size_t __refs = 0); + +protected: + ~collate_byname(); + virtual int do_compare(const char_type* __lo1, const char_type* __hi1, + const char_type* __lo2, const char_type* __hi2) const; + virtual string_type do_transform(const char_type* __lo, const char_type* __hi) const; +}; + +template <> +class _LIBCUDACXX_TYPE_VIS collate_byname + : public collate +{ + locale_t __l; +public: + typedef wchar_t char_type; + typedef basic_string string_type; + + explicit collate_byname(const char* __n, size_t __refs = 0); + explicit collate_byname(const string& __n, size_t __refs = 0); + +protected: + ~collate_byname(); + + virtual int do_compare(const char_type* __lo1, const char_type* __hi1, + const char_type* __lo2, const char_type* __hi2) const; + virtual string_type do_transform(const char_type* __lo, const char_type* __hi) const; +}; + +template +bool +locale::operator()(const basic_string<_CharT, _Traits, _Allocator>& __x, + const basic_string<_CharT, _Traits, _Allocator>& __y) const +{ + return _CUDA_VSTD::use_facet<_CUDA_VSTD::collate<_CharT> >(*this).compare( + __x.data(), __x.data() + __x.size(), + __y.data(), __y.data() + __y.size()) < 0; +} + +// template class ctype + +class _LIBCUDACXX_TYPE_VIS ctype_base +{ +public: +#if defined(__GLIBC__) + typedef unsigned short mask; + static const mask space = _ISspace; + static const mask print = _ISprint; + static const mask cntrl = _IScntrl; + static const mask upper = _ISupper; + static const mask lower = _ISlower; + static const mask alpha = _ISalpha; + static const mask digit = _ISdigit; + static const mask punct = _ISpunct; + static const mask xdigit = _ISxdigit; + static const mask blank = _ISblank; +#if defined(__mips__) + static const mask __regex_word = static_cast(_ISbit(15)); +#else + static const mask __regex_word = 0x80; +#endif +#elif defined(_LIBCUDACXX_MSVCRT_LIKE) + typedef unsigned short mask; + static const mask space = _SPACE; + static const mask print = _BLANK|_PUNCT|_ALPHA|_DIGIT; + static const mask cntrl = _CONTROL; + static const mask upper = _UPPER; + static const mask lower = _LOWER; + static const mask alpha = _ALPHA; + static const mask digit = _DIGIT; + static const mask punct = _PUNCT; + static const mask xdigit = _HEX; + static const mask blank = _BLANK; + static const mask __regex_word = 0x80; +# define _LIBCUDACXX_CTYPE_MASK_IS_COMPOSITE_PRINT +#elif defined(__APPLE__) || defined(__FreeBSD__) || defined(__EMSCRIPTEN__) || defined(__NetBSD__) +# ifdef __APPLE__ + typedef __uint32_t mask; +# elif defined(__FreeBSD__) + typedef unsigned long mask; +# elif defined(__EMSCRIPTEN__) || defined(__NetBSD__) + typedef unsigned short mask; +# endif + static const mask space = _CTYPE_S; + static const mask print = _CTYPE_R; + static const mask cntrl = _CTYPE_C; + static const mask upper = _CTYPE_U; + static const mask lower = _CTYPE_L; + static const mask alpha = _CTYPE_A; + static const mask digit = _CTYPE_D; + static const mask punct = _CTYPE_P; + static const mask xdigit = _CTYPE_X; + +# if defined(__NetBSD__) + static const mask blank = _CTYPE_BL; + // NetBSD defines classes up to 0x2000 + // see sys/ctype_bits.h, _CTYPE_Q + static const mask __regex_word = 0x8000; +# else + static const mask blank = _CTYPE_B; + static const mask __regex_word = 0x80; +# endif +#elif defined(__sun__) || defined(_AIX) + typedef unsigned int mask; + static const mask space = _ISSPACE; + static const mask print = _ISPRINT; + static const mask cntrl = _ISCNTRL; + static const mask upper = _ISUPPER; + static const mask lower = _ISLOWER; + static const mask alpha = _ISALPHA; + static const mask digit = _ISDIGIT; + static const mask punct = _ISPUNCT; + static const mask xdigit = _ISXDIGIT; + static const mask blank = _ISBLANK; + static const mask __regex_word = 0x80; +#elif defined(_NEWLIB_VERSION) + // Same type as Newlib's _ctype_ array in newlib/libc/include/ctype.h. + typedef char mask; + static const mask space = _S; + static const mask print = _P | _U | _L | _N | _B; + static const mask cntrl = _C; + static const mask upper = _U; + static const mask lower = _L; + static const mask alpha = _U | _L; + static const mask digit = _N; + static const mask punct = _P; + static const mask xdigit = _X | _N; + static const mask blank = _B; + static const mask __regex_word = 0x80; +# define _LIBCUDACXX_CTYPE_MASK_IS_COMPOSITE_PRINT +# define _LIBCUDACXX_CTYPE_MASK_IS_COMPOSITE_ALPHA +# define _LIBCUDACXX_CTYPE_MASK_IS_COMPOSITE_XDIGIT +#else + typedef unsigned long mask; + static const mask space = 1<<0; + static const mask print = 1<<1; + static const mask cntrl = 1<<2; + static const mask upper = 1<<3; + static const mask lower = 1<<4; + static const mask alpha = 1<<5; + static const mask digit = 1<<6; + static const mask punct = 1<<7; + static const mask xdigit = 1<<8; + static const mask blank = 1<<9; + static const mask __regex_word = 1<<10; +#endif + static const mask alnum = alpha | digit; + static const mask graph = alnum | punct; + + _LIBCUDACXX_INLINE_VISIBILITY ctype_base() {} +}; + +template class _LIBCUDACXX_TEMPLATE_VIS ctype; + +template <> +class _LIBCUDACXX_TYPE_VIS ctype + : public locale::facet, + public ctype_base +{ +public: + typedef wchar_t char_type; + + _LIBCUDACXX_INLINE_VISIBILITY + explicit ctype(size_t __refs = 0) + : locale::facet(__refs) {} + + _LIBCUDACXX_INLINE_VISIBILITY + bool is(mask __m, char_type __c) const + { + return do_is(__m, __c); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* is(const char_type* __low, const char_type* __high, mask* __vec) const + { + return do_is(__low, __high, __vec); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* scan_is(mask __m, const char_type* __low, const char_type* __high) const + { + return do_scan_is(__m, __low, __high); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* scan_not(mask __m, const char_type* __low, const char_type* __high) const + { + return do_scan_not(__m, __low, __high); + } + + _LIBCUDACXX_INLINE_VISIBILITY + char_type toupper(char_type __c) const + { + return do_toupper(__c); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* toupper(char_type* __low, const char_type* __high) const + { + return do_toupper(__low, __high); + } + + _LIBCUDACXX_INLINE_VISIBILITY + char_type tolower(char_type __c) const + { + return do_tolower(__c); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* tolower(char_type* __low, const char_type* __high) const + { + return do_tolower(__low, __high); + } + + _LIBCUDACXX_INLINE_VISIBILITY + char_type widen(char __c) const + { + return do_widen(__c); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char* widen(const char* __low, const char* __high, char_type* __to) const + { + return do_widen(__low, __high, __to); + } + + _LIBCUDACXX_INLINE_VISIBILITY + char narrow(char_type __c, char __dfault) const + { + return do_narrow(__c, __dfault); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* narrow(const char_type* __low, const char_type* __high, char __dfault, char* __to) const + { + return do_narrow(__low, __high, __dfault, __to); + } + + static locale::id id; + +protected: + ~ctype(); + virtual bool do_is(mask __m, char_type __c) const; + virtual const char_type* do_is(const char_type* __low, const char_type* __high, mask* __vec) const; + virtual const char_type* do_scan_is(mask __m, const char_type* __low, const char_type* __high) const; + virtual const char_type* do_scan_not(mask __m, const char_type* __low, const char_type* __high) const; + virtual char_type do_toupper(char_type) const; + virtual const char_type* do_toupper(char_type* __low, const char_type* __high) const; + virtual char_type do_tolower(char_type) const; + virtual const char_type* do_tolower(char_type* __low, const char_type* __high) const; + virtual char_type do_widen(char) const; + virtual const char* do_widen(const char* __low, const char* __high, char_type* __dest) const; + virtual char do_narrow(char_type, char __dfault) const; + virtual const char_type* do_narrow(const char_type* __low, const char_type* __high, char __dfault, char* __dest) const; +}; + +template <> +class _LIBCUDACXX_TYPE_VIS ctype + : public locale::facet, public ctype_base +{ + const mask* __tab_; + bool __del_; +public: + typedef char char_type; + + explicit ctype(const mask* __tab = 0, bool __del = false, size_t __refs = 0); + + _LIBCUDACXX_INLINE_VISIBILITY + bool is(mask __m, char_type __c) const + { + return isascii(__c) ? (__tab_[static_cast(__c)] & __m) !=0 : false; + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* is(const char_type* __low, const char_type* __high, mask* __vec) const + { + for (; __low != __high; ++__low, ++__vec) + *__vec = isascii(*__low) ? __tab_[static_cast(*__low)] : 0; + return __low; + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* scan_is (mask __m, const char_type* __low, const char_type* __high) const + { + for (; __low != __high; ++__low) + if (isascii(*__low) && (__tab_[static_cast(*__low)] & __m)) + break; + return __low; + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* scan_not(mask __m, const char_type* __low, const char_type* __high) const + { + for (; __low != __high; ++__low) + if (!(isascii(*__low) && (__tab_[static_cast(*__low)] & __m))) + break; + return __low; + } + + _LIBCUDACXX_INLINE_VISIBILITY + char_type toupper(char_type __c) const + { + return do_toupper(__c); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* toupper(char_type* __low, const char_type* __high) const + { + return do_toupper(__low, __high); + } + + _LIBCUDACXX_INLINE_VISIBILITY + char_type tolower(char_type __c) const + { + return do_tolower(__c); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char_type* tolower(char_type* __low, const char_type* __high) const + { + return do_tolower(__low, __high); + } + + _LIBCUDACXX_INLINE_VISIBILITY + char_type widen(char __c) const + { + return do_widen(__c); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char* widen(const char* __low, const char* __high, char_type* __to) const + { + return do_widen(__low, __high, __to); + } + + _LIBCUDACXX_INLINE_VISIBILITY + char narrow(char_type __c, char __dfault) const + { + return do_narrow(__c, __dfault); + } + + _LIBCUDACXX_INLINE_VISIBILITY + const char* narrow(const char_type* __low, const char_type* __high, char __dfault, char* __to) const + { + return do_narrow(__low, __high, __dfault, __to); + } + + static locale::id id; + +#ifdef _CACHED_RUNES + static const size_t table_size = _CACHED_RUNES; +#else + static const size_t table_size = 256; // FIXME: Don't hardcode this. +#endif + _LIBCUDACXX_INLINE_VISIBILITY const mask* table() const _NOEXCEPT {return __tab_;} + static const mask* classic_table() _NOEXCEPT; +#if defined(__GLIBC__) || defined(__EMSCRIPTEN__) + static const int* __classic_upper_table() _NOEXCEPT; + static const int* __classic_lower_table() _NOEXCEPT; +#endif +#if defined(__NetBSD__) + static const short* __classic_upper_table() _NOEXCEPT; + static const short* __classic_lower_table() _NOEXCEPT; +#endif + +protected: + ~ctype(); + virtual char_type do_toupper(char_type __c) const; + virtual const char_type* do_toupper(char_type* __low, const char_type* __high) const; + virtual char_type do_tolower(char_type __c) const; + virtual const char_type* do_tolower(char_type* __low, const char_type* __high) const; + virtual char_type do_widen(char __c) const; + virtual const char* do_widen(const char* __low, const char* __high, char_type* __to) const; + virtual char do_narrow(char_type __c, char __dfault) const; + virtual const char* do_narrow(const char_type* __low, const char_type* __high, char __dfault, char* __to) const; +}; + +// template class ctype_byname; + +template class _LIBCUDACXX_TEMPLATE_VIS ctype_byname; + +template <> +class _LIBCUDACXX_TYPE_VIS ctype_byname + : public ctype +{ + locale_t __l; + +public: + explicit ctype_byname(const char*, size_t = 0); + explicit ctype_byname(const string&, size_t = 0); + +protected: + ~ctype_byname(); + virtual char_type do_toupper(char_type) const; + virtual const char_type* do_toupper(char_type* __low, const char_type* __high) const; + virtual char_type do_tolower(char_type) const; + virtual const char_type* do_tolower(char_type* __low, const char_type* __high) const; +}; + +template <> +class _LIBCUDACXX_TYPE_VIS ctype_byname + : public ctype +{ + locale_t __l; + +public: + explicit ctype_byname(const char*, size_t = 0); + explicit ctype_byname(const string&, size_t = 0); + +protected: + ~ctype_byname(); + virtual bool do_is(mask __m, char_type __c) const; + virtual const char_type* do_is(const char_type* __low, const char_type* __high, mask* __vec) const; + virtual const char_type* do_scan_is(mask __m, const char_type* __low, const char_type* __high) const; + virtual const char_type* do_scan_not(mask __m, const char_type* __low, const char_type* __high) const; + virtual char_type do_toupper(char_type) const; + virtual const char_type* do_toupper(char_type* __low, const char_type* __high) const; + virtual char_type do_tolower(char_type) const; + virtual const char_type* do_tolower(char_type* __low, const char_type* __high) const; + virtual char_type do_widen(char) const; + virtual const char* do_widen(const char* __low, const char* __high, char_type* __dest) const; + virtual char do_narrow(char_type, char __dfault) const; + virtual const char_type* do_narrow(const char_type* __low, const char_type* __high, char __dfault, char* __dest) const; +}; + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +isspace(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::space, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +isprint(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::print, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +iscntrl(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::cntrl, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +isupper(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::upper, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +islower(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::lower, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +isalpha(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::alpha, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +isdigit(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::digit, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +ispunct(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::punct, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +isxdigit(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::xdigit, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +isalnum(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::alnum, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +isgraph(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).is(ctype_base::graph, __c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +_CharT +toupper(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).toupper(__c); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +_CharT +tolower(_CharT __c, const locale& __loc) +{ + return use_facet >(__loc).tolower(__c); +} + +// codecvt_base + +class _LIBCUDACXX_TYPE_VIS codecvt_base +{ +public: + _LIBCUDACXX_INLINE_VISIBILITY codecvt_base() {} + enum result {ok, partial, error, noconv}; +}; + +// template class codecvt; + +template class _LIBCUDACXX_TEMPLATE_VIS codecvt; + +// template <> class codecvt + +template <> +class _LIBCUDACXX_TYPE_VIS codecvt + : public locale::facet, + public codecvt_base +{ +public: + typedef char intern_type; + typedef char extern_type; + typedef mbstate_t state_type; + + _LIBCUDACXX_INLINE_VISIBILITY + explicit codecvt(size_t __refs = 0) + : locale::facet(__refs) {} + + _LIBCUDACXX_INLINE_VISIBILITY + result out(state_type& __st, + const intern_type* __frm, const intern_type* __frm_end, const intern_type*& __frm_nxt, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const + { + return do_out(__st, __frm, __frm_end, __frm_nxt, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + result unshift(state_type& __st, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const + { + return do_unshift(__st, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + result in(state_type& __st, + const extern_type* __frm, const extern_type* __frm_end, const extern_type*& __frm_nxt, + intern_type* __to, intern_type* __to_end, intern_type*& __to_nxt) const + { + return do_in(__st, __frm, __frm_end, __frm_nxt, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int encoding() const _NOEXCEPT + { + return do_encoding(); + } + + _LIBCUDACXX_INLINE_VISIBILITY + bool always_noconv() const _NOEXCEPT + { + return do_always_noconv(); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int length(state_type& __st, const extern_type* __frm, const extern_type* __end, size_t __mx) const + { + return do_length(__st, __frm, __end, __mx); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int max_length() const _NOEXCEPT + { + return do_max_length(); + } + + static locale::id id; + +protected: + _LIBCUDACXX_INLINE_VISIBILITY + explicit codecvt(const char*, size_t __refs = 0) + : locale::facet(__refs) {} + + ~codecvt(); + + virtual result do_out(state_type& __st, + const intern_type* __frm, const intern_type* __frm_end, const intern_type*& __frm_nxt, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const; + virtual result do_in(state_type& __st, + const extern_type* __frm, const extern_type* __frm_end, const extern_type*& __frm_nxt, + intern_type* __to, intern_type* __to_end, intern_type*& __to_nxt) const; + virtual result do_unshift(state_type& __st, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const; + virtual int do_encoding() const _NOEXCEPT; + virtual bool do_always_noconv() const _NOEXCEPT; + virtual int do_length(state_type& __st, const extern_type* __frm, const extern_type* __end, size_t __mx) const; + virtual int do_max_length() const _NOEXCEPT; +}; + +// template <> class codecvt + +template <> +class _LIBCUDACXX_TYPE_VIS codecvt + : public locale::facet, + public codecvt_base +{ + locale_t __l; +public: + typedef wchar_t intern_type; + typedef char extern_type; + typedef mbstate_t state_type; + + explicit codecvt(size_t __refs = 0); + + _LIBCUDACXX_INLINE_VISIBILITY + result out(state_type& __st, + const intern_type* __frm, const intern_type* __frm_end, const intern_type*& __frm_nxt, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const + { + return do_out(__st, __frm, __frm_end, __frm_nxt, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + result unshift(state_type& __st, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const + { + return do_unshift(__st, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + result in(state_type& __st, + const extern_type* __frm, const extern_type* __frm_end, const extern_type*& __frm_nxt, + intern_type* __to, intern_type* __to_end, intern_type*& __to_nxt) const + { + return do_in(__st, __frm, __frm_end, __frm_nxt, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int encoding() const _NOEXCEPT + { + return do_encoding(); + } + + _LIBCUDACXX_INLINE_VISIBILITY + bool always_noconv() const _NOEXCEPT + { + return do_always_noconv(); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int length(state_type& __st, const extern_type* __frm, const extern_type* __end, size_t __mx) const + { + return do_length(__st, __frm, __end, __mx); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int max_length() const _NOEXCEPT + { + return do_max_length(); + } + + static locale::id id; + +protected: + explicit codecvt(const char*, size_t __refs = 0); + + ~codecvt(); + + virtual result do_out(state_type& __st, + const intern_type* __frm, const intern_type* __frm_end, const intern_type*& __frm_nxt, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const; + virtual result do_in(state_type& __st, + const extern_type* __frm, const extern_type* __frm_end, const extern_type*& __frm_nxt, + intern_type* __to, intern_type* __to_end, intern_type*& __to_nxt) const; + virtual result do_unshift(state_type& __st, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const; + virtual int do_encoding() const _NOEXCEPT; + virtual bool do_always_noconv() const _NOEXCEPT; + virtual int do_length(state_type&, const extern_type* __frm, const extern_type* __end, size_t __mx) const; + virtual int do_max_length() const _NOEXCEPT; +}; + +// template <> class codecvt + +template <> +class _LIBCUDACXX_TYPE_VIS codecvt + : public locale::facet, + public codecvt_base +{ +public: + typedef char16_t intern_type; + typedef char extern_type; + typedef mbstate_t state_type; + + _LIBCUDACXX_INLINE_VISIBILITY + explicit codecvt(size_t __refs = 0) + : locale::facet(__refs) {} + + _LIBCUDACXX_INLINE_VISIBILITY + result out(state_type& __st, + const intern_type* __frm, const intern_type* __frm_end, const intern_type*& __frm_nxt, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const + { + return do_out(__st, __frm, __frm_end, __frm_nxt, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + result unshift(state_type& __st, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const + { + return do_unshift(__st, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + result in(state_type& __st, + const extern_type* __frm, const extern_type* __frm_end, const extern_type*& __frm_nxt, + intern_type* __to, intern_type* __to_end, intern_type*& __to_nxt) const + { + return do_in(__st, __frm, __frm_end, __frm_nxt, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int encoding() const _NOEXCEPT + { + return do_encoding(); + } + + _LIBCUDACXX_INLINE_VISIBILITY + bool always_noconv() const _NOEXCEPT + { + return do_always_noconv(); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int length(state_type& __st, const extern_type* __frm, const extern_type* __end, size_t __mx) const + { + return do_length(__st, __frm, __end, __mx); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int max_length() const _NOEXCEPT + { + return do_max_length(); + } + + static locale::id id; + +protected: + _LIBCUDACXX_INLINE_VISIBILITY + explicit codecvt(const char*, size_t __refs = 0) + : locale::facet(__refs) {} + + ~codecvt(); + + virtual result do_out(state_type& __st, + const intern_type* __frm, const intern_type* __frm_end, const intern_type*& __frm_nxt, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const; + virtual result do_in(state_type& __st, + const extern_type* __frm, const extern_type* __frm_end, const extern_type*& __frm_nxt, + intern_type* __to, intern_type* __to_end, intern_type*& __to_nxt) const; + virtual result do_unshift(state_type& __st, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const; + virtual int do_encoding() const _NOEXCEPT; + virtual bool do_always_noconv() const _NOEXCEPT; + virtual int do_length(state_type&, const extern_type* __frm, const extern_type* __end, size_t __mx) const; + virtual int do_max_length() const _NOEXCEPT; +}; + +// template <> class codecvt + +template <> +class _LIBCUDACXX_TYPE_VIS codecvt + : public locale::facet, + public codecvt_base +{ +public: + typedef char32_t intern_type; + typedef char extern_type; + typedef mbstate_t state_type; + + _LIBCUDACXX_INLINE_VISIBILITY + explicit codecvt(size_t __refs = 0) + : locale::facet(__refs) {} + + _LIBCUDACXX_INLINE_VISIBILITY + result out(state_type& __st, + const intern_type* __frm, const intern_type* __frm_end, const intern_type*& __frm_nxt, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const + { + return do_out(__st, __frm, __frm_end, __frm_nxt, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + result unshift(state_type& __st, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const + { + return do_unshift(__st, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + result in(state_type& __st, + const extern_type* __frm, const extern_type* __frm_end, const extern_type*& __frm_nxt, + intern_type* __to, intern_type* __to_end, intern_type*& __to_nxt) const + { + return do_in(__st, __frm, __frm_end, __frm_nxt, __to, __to_end, __to_nxt); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int encoding() const _NOEXCEPT + { + return do_encoding(); + } + + _LIBCUDACXX_INLINE_VISIBILITY + bool always_noconv() const _NOEXCEPT + { + return do_always_noconv(); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int length(state_type& __st, const extern_type* __frm, const extern_type* __end, size_t __mx) const + { + return do_length(__st, __frm, __end, __mx); + } + + _LIBCUDACXX_INLINE_VISIBILITY + int max_length() const _NOEXCEPT + { + return do_max_length(); + } + + static locale::id id; + +protected: + _LIBCUDACXX_INLINE_VISIBILITY + explicit codecvt(const char*, size_t __refs = 0) + : locale::facet(__refs) {} + + ~codecvt(); + + virtual result do_out(state_type& __st, + const intern_type* __frm, const intern_type* __frm_end, const intern_type*& __frm_nxt, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const; + virtual result do_in(state_type& __st, + const extern_type* __frm, const extern_type* __frm_end, const extern_type*& __frm_nxt, + intern_type* __to, intern_type* __to_end, intern_type*& __to_nxt) const; + virtual result do_unshift(state_type& __st, + extern_type* __to, extern_type* __to_end, extern_type*& __to_nxt) const; + virtual int do_encoding() const _NOEXCEPT; + virtual bool do_always_noconv() const _NOEXCEPT; + virtual int do_length(state_type&, const extern_type* __frm, const extern_type* __end, size_t __mx) const; + virtual int do_max_length() const _NOEXCEPT; +}; + +// template class codecvt_byname + +template +class _LIBCUDACXX_TEMPLATE_VIS codecvt_byname + : public codecvt<_InternT, _ExternT, _StateT> +{ +public: + _LIBCUDACXX_INLINE_VISIBILITY + explicit codecvt_byname(const char* __nm, size_t __refs = 0) + : codecvt<_InternT, _ExternT, _StateT>(__nm, __refs) {} + _LIBCUDACXX_INLINE_VISIBILITY + explicit codecvt_byname(const string& __nm, size_t __refs = 0) + : codecvt<_InternT, _ExternT, _StateT>(__nm.c_str(), __refs) {} +protected: + ~codecvt_byname(); +}; + +template +codecvt_byname<_InternT, _ExternT, _StateT>::~codecvt_byname() +{ +} + +_LIBCUDACXX_EXTERN_TEMPLATE2(class _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS codecvt_byname) +_LIBCUDACXX_EXTERN_TEMPLATE2(class _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS codecvt_byname) +_LIBCUDACXX_EXTERN_TEMPLATE2(class _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS codecvt_byname) +_LIBCUDACXX_EXTERN_TEMPLATE2(class _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS codecvt_byname) + +template +struct __narrow_to_utf8 +{ + template + _OutputIterator + operator()(_OutputIterator __s, const _CharT* __wb, const _CharT* __we) const; +}; + +template <> +struct __narrow_to_utf8<8> +{ + template + _LIBCUDACXX_INLINE_VISIBILITY + _OutputIterator + operator()(_OutputIterator __s, const _CharT* __wb, const _CharT* __we) const + { + for (; __wb < __we; ++__wb, ++__s) + *__s = *__wb; + return __s; + } +}; + +template <> +struct _LIBCUDACXX_TEMPLATE_VIS __narrow_to_utf8<16> + : public codecvt +{ + _LIBCUDACXX_INLINE_VISIBILITY + __narrow_to_utf8() : codecvt(1) {} + + _LIBCUDACXX_EXPORTED_FROM_ABI ~__narrow_to_utf8(); + + template + _LIBCUDACXX_INLINE_VISIBILITY + _OutputIterator + operator()(_OutputIterator __s, const _CharT* __wb, const _CharT* __we) const + { + result __r = ok; + mbstate_t __mb; + while (__wb < __we && __r != error) + { + const int __sz = 32; + char __buf[__sz]; + char* __bn; + const char16_t* __wn = (const char16_t*)__wb; + __r = do_out(__mb, (const char16_t*)__wb, (const char16_t*)__we, __wn, + __buf, __buf+__sz, __bn); + if (__r == codecvt_base::error || __wn == (const char16_t*)__wb) + __throw_runtime_error("locale not supported"); + for (const char* __p = __buf; __p < __bn; ++__p, ++__s) + *__s = *__p; + __wb = (const _CharT*)__wn; + } + return __s; + } +}; + +template <> +struct _LIBCUDACXX_TEMPLATE_VIS __narrow_to_utf8<32> + : public codecvt +{ + _LIBCUDACXX_INLINE_VISIBILITY + __narrow_to_utf8() : codecvt(1) {} + + _LIBCUDACXX_EXPORTED_FROM_ABI ~__narrow_to_utf8(); + + template + _LIBCUDACXX_INLINE_VISIBILITY + _OutputIterator + operator()(_OutputIterator __s, const _CharT* __wb, const _CharT* __we) const + { + result __r = ok; + mbstate_t __mb; + while (__wb < __we && __r != error) + { + const int __sz = 32; + char __buf[__sz]; + char* __bn; + const char32_t* __wn = (const char32_t*)__wb; + __r = do_out(__mb, (const char32_t*)__wb, (const char32_t*)__we, __wn, + __buf, __buf+__sz, __bn); + if (__r == codecvt_base::error || __wn == (const char32_t*)__wb) + __throw_runtime_error("locale not supported"); + for (const char* __p = __buf; __p < __bn; ++__p, ++__s) + *__s = *__p; + __wb = (const _CharT*)__wn; + } + return __s; + } +}; + +template +struct __widen_from_utf8 +{ + template + _OutputIterator + operator()(_OutputIterator __s, const char* __nb, const char* __ne) const; +}; + +template <> +struct __widen_from_utf8<8> +{ + template + _LIBCUDACXX_INLINE_VISIBILITY + _OutputIterator + operator()(_OutputIterator __s, const char* __nb, const char* __ne) const + { + for (; __nb < __ne; ++__nb, ++__s) + *__s = *__nb; + return __s; + } +}; + +template <> +struct _LIBCUDACXX_TEMPLATE_VIS __widen_from_utf8<16> + : public codecvt +{ + _LIBCUDACXX_INLINE_VISIBILITY + __widen_from_utf8() : codecvt(1) {} + + _LIBCUDACXX_EXPORTED_FROM_ABI ~__widen_from_utf8(); + + template + _LIBCUDACXX_INLINE_VISIBILITY + _OutputIterator + operator()(_OutputIterator __s, const char* __nb, const char* __ne) const + { + result __r = ok; + mbstate_t __mb; + while (__nb < __ne && __r != error) + { + const int __sz = 32; + char16_t __buf[__sz]; + char16_t* __bn; + const char* __nn = __nb; + __r = do_in(__mb, __nb, __ne - __nb > __sz ? __nb+__sz : __ne, __nn, + __buf, __buf+__sz, __bn); + if (__r == codecvt_base::error || __nn == __nb) + __throw_runtime_error("locale not supported"); + for (const char16_t* __p = __buf; __p < __bn; ++__p, ++__s) + *__s = (wchar_t)*__p; + __nb = __nn; + } + return __s; + } +}; + +template <> +struct _LIBCUDACXX_TEMPLATE_VIS __widen_from_utf8<32> + : public codecvt +{ + _LIBCUDACXX_INLINE_VISIBILITY + __widen_from_utf8() : codecvt(1) {} + + _LIBCUDACXX_EXPORTED_FROM_ABI ~__widen_from_utf8(); + + template + _LIBCUDACXX_INLINE_VISIBILITY + _OutputIterator + operator()(_OutputIterator __s, const char* __nb, const char* __ne) const + { + result __r = ok; + mbstate_t __mb; + while (__nb < __ne && __r != error) + { + const int __sz = 32; + char32_t __buf[__sz]; + char32_t* __bn; + const char* __nn = __nb; + __r = do_in(__mb, __nb, __ne - __nb > __sz ? __nb+__sz : __ne, __nn, + __buf, __buf+__sz, __bn); + if (__r == codecvt_base::error || __nn == __nb) + __throw_runtime_error("locale not supported"); + for (const char32_t* __p = __buf; __p < __bn; ++__p, ++__s) + *__s = (wchar_t)*__p; + __nb = __nn; + } + return __s; + } +}; + +// template class numpunct + +template class _LIBCUDACXX_TEMPLATE_VIS numpunct; + +template <> +class _LIBCUDACXX_TYPE_VIS numpunct + : public locale::facet +{ +public: + typedef char char_type; + typedef basic_string string_type; + + explicit numpunct(size_t __refs = 0); + + _LIBCUDACXX_INLINE_VISIBILITY char_type decimal_point() const {return do_decimal_point();} + _LIBCUDACXX_INLINE_VISIBILITY char_type thousands_sep() const {return do_thousands_sep();} + _LIBCUDACXX_INLINE_VISIBILITY string grouping() const {return do_grouping();} + _LIBCUDACXX_INLINE_VISIBILITY string_type truename() const {return do_truename();} + _LIBCUDACXX_INLINE_VISIBILITY string_type falsename() const {return do_falsename();} + + static locale::id id; + +protected: + ~numpunct(); + virtual char_type do_decimal_point() const; + virtual char_type do_thousands_sep() const; + virtual string do_grouping() const; + virtual string_type do_truename() const; + virtual string_type do_falsename() const; + + char_type __decimal_point_; + char_type __thousands_sep_; + string __grouping_; +}; + +template <> +class _LIBCUDACXX_TYPE_VIS numpunct + : public locale::facet +{ +public: + typedef wchar_t char_type; + typedef basic_string string_type; + + explicit numpunct(size_t __refs = 0); + + _LIBCUDACXX_INLINE_VISIBILITY char_type decimal_point() const {return do_decimal_point();} + _LIBCUDACXX_INLINE_VISIBILITY char_type thousands_sep() const {return do_thousands_sep();} + _LIBCUDACXX_INLINE_VISIBILITY string grouping() const {return do_grouping();} + _LIBCUDACXX_INLINE_VISIBILITY string_type truename() const {return do_truename();} + _LIBCUDACXX_INLINE_VISIBILITY string_type falsename() const {return do_falsename();} + + static locale::id id; + +protected: + ~numpunct(); + virtual char_type do_decimal_point() const; + virtual char_type do_thousands_sep() const; + virtual string do_grouping() const; + virtual string_type do_truename() const; + virtual string_type do_falsename() const; + + char_type __decimal_point_; + char_type __thousands_sep_; + string __grouping_; +}; + +// template class numpunct_byname + +template class _LIBCUDACXX_TEMPLATE_VIS numpunct_byname; + +template <> +class _LIBCUDACXX_TYPE_VIS numpunct_byname +: public numpunct +{ +public: + typedef char char_type; + typedef basic_string string_type; + + explicit numpunct_byname(const char* __nm, size_t __refs = 0); + explicit numpunct_byname(const string& __nm, size_t __refs = 0); + +protected: + ~numpunct_byname(); + +private: + void __init(const char*); +}; + +template <> +class _LIBCUDACXX_TYPE_VIS numpunct_byname +: public numpunct +{ +public: + typedef wchar_t char_type; + typedef basic_string string_type; + + explicit numpunct_byname(const char* __nm, size_t __refs = 0); + explicit numpunct_byname(const string& __nm, size_t __refs = 0); + +protected: + ~numpunct_byname(); + +private: + void __init(const char*); +}; + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _LIBCUDACXX___LOCALE diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__mutex_base b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__mutex_base new file mode 100644 index 000000000000..bae7c7c210c3 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__mutex_base @@ -0,0 +1,541 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___MUTEX_BASE +#define _LIBCUDACXX___MUTEX_BASE + +#include <__config> +#include +#include +#include <__threading_support> + +#include + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_PUSH_MACROS +#include <__undef_macros> + + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +#ifndef _LIBCUDACXX_HAS_NO_THREADS + +#ifndef _LIBCUDACXX_THREAD_SAFETY_ANNOTATION +# ifdef _LIBCUDACXX_HAS_THREAD_SAFETY_ANNOTATIONS +# define _LIBCUDACXX_THREAD_SAFETY_ANNOTATION(x) __attribute__((x)) +# else +# define _LIBCUDACXX_THREAD_SAFETY_ANNOTATION(x) +# endif +#endif // _LIBCUDACXX_THREAD_SAFETY_ANNOTATION + + +class _LIBCUDACXX_TYPE_VIS _LIBCUDACXX_THREAD_SAFETY_ANNOTATION(capability("mutex")) mutex +{ + __libcpp_mutex_t __m_ = _LIBCUDACXX_MUTEX_INITIALIZER; + +public: + _LIBCUDACXX_INLINE_VISIBILITY + _LIBCUDACXX_CONSTEXPR mutex() = default; + + mutex(const mutex&) = delete; + mutex& operator=(const mutex&) = delete; + +#if defined(_LIBCUDACXX_HAS_TRIVIAL_MUTEX_DESTRUCTION) + ~mutex() = default; +#else + ~mutex() _NOEXCEPT; +#endif + + void lock() _LIBCUDACXX_THREAD_SAFETY_ANNOTATION(acquire_capability()); + bool try_lock() _NOEXCEPT _LIBCUDACXX_THREAD_SAFETY_ANNOTATION(try_acquire_capability(true)); + void unlock() _NOEXCEPT _LIBCUDACXX_THREAD_SAFETY_ANNOTATION(release_capability()); + + typedef __libcpp_mutex_t* native_handle_type; + _LIBCUDACXX_INLINE_VISIBILITY native_handle_type native_handle() {return &__m_;} +}; + +static_assert(is_nothrow_default_constructible::value, + "the default constructor for std::mutex must be nothrow"); + +struct _LIBCUDACXX_TYPE_VIS defer_lock_t { explicit defer_lock_t() = default; }; +struct _LIBCUDACXX_TYPE_VIS try_to_lock_t { explicit try_to_lock_t() = default; }; +struct _LIBCUDACXX_TYPE_VIS adopt_lock_t { explicit adopt_lock_t() = default; }; + +#if defined(_LIBCUDACXX_CXX03_LANG) || defined(_LIBCUDACXX_BUILDING_LIBRARY) + +extern _LIBCUDACXX_EXPORTED_FROM_ABI const defer_lock_t defer_lock; +extern _LIBCUDACXX_EXPORTED_FROM_ABI const try_to_lock_t try_to_lock; +extern _LIBCUDACXX_EXPORTED_FROM_ABI const adopt_lock_t adopt_lock; + +#else + +/* _LIBCUDACXX_INLINE_VAR */ constexpr defer_lock_t defer_lock = defer_lock_t(); +/* _LIBCUDACXX_INLINE_VAR */ constexpr try_to_lock_t try_to_lock = try_to_lock_t(); +/* _LIBCUDACXX_INLINE_VAR */ constexpr adopt_lock_t adopt_lock = adopt_lock_t(); + +#endif + +template +class _LIBCUDACXX_TEMPLATE_VIS _LIBCUDACXX_THREAD_SAFETY_ANNOTATION(scoped_lockable) +lock_guard +{ +public: + typedef _Mutex mutex_type; + +private: + mutex_type& __m_; +public: + + _LIBCUDACXX_NODISCARD_EXT _LIBCUDACXX_INLINE_VISIBILITY + explicit lock_guard(mutex_type& __m) _LIBCUDACXX_THREAD_SAFETY_ANNOTATION(acquire_capability(__m)) + : __m_(__m) {__m_.lock();} + + _LIBCUDACXX_NODISCARD_EXT _LIBCUDACXX_INLINE_VISIBILITY + lock_guard(mutex_type& __m, adopt_lock_t) _LIBCUDACXX_THREAD_SAFETY_ANNOTATION(requires_capability(__m)) + : __m_(__m) {} + _LIBCUDACXX_INLINE_VISIBILITY + ~lock_guard() _LIBCUDACXX_THREAD_SAFETY_ANNOTATION(release_capability()) {__m_.unlock();} + +private: + lock_guard(lock_guard const&) _LIBCUDACXX_EQUAL_DELETE; + lock_guard& operator=(lock_guard const&) _LIBCUDACXX_EQUAL_DELETE; +}; + +template +class _LIBCUDACXX_TEMPLATE_VIS unique_lock +{ +public: + typedef _Mutex mutex_type; + +private: + mutex_type* __m_; + bool __owns_; + +public: + _LIBCUDACXX_INLINE_VISIBILITY + unique_lock() _NOEXCEPT : __m_(nullptr), __owns_(false) {} + _LIBCUDACXX_INLINE_VISIBILITY + explicit unique_lock(mutex_type& __m) + : __m_(_CUDA_VSTD::addressof(__m)), __owns_(true) {__m_->lock();} + _LIBCUDACXX_INLINE_VISIBILITY + unique_lock(mutex_type& __m, defer_lock_t) _NOEXCEPT + : __m_(_CUDA_VSTD::addressof(__m)), __owns_(false) {} + _LIBCUDACXX_INLINE_VISIBILITY + unique_lock(mutex_type& __m, try_to_lock_t) + : __m_(_CUDA_VSTD::addressof(__m)), __owns_(__m.try_lock()) {} + _LIBCUDACXX_INLINE_VISIBILITY + unique_lock(mutex_type& __m, adopt_lock_t) + : __m_(_CUDA_VSTD::addressof(__m)), __owns_(true) {} + template + _LIBCUDACXX_INLINE_VISIBILITY + unique_lock(mutex_type& __m, const chrono::time_point<_Clock, _Duration>& __t) + : __m_(_CUDA_VSTD::addressof(__m)), __owns_(__m.try_lock_until(__t)) {} + template + _LIBCUDACXX_INLINE_VISIBILITY + unique_lock(mutex_type& __m, const chrono::duration<_Rep, _Period>& __d) + : __m_(_CUDA_VSTD::addressof(__m)), __owns_(__m.try_lock_for(__d)) {} + _LIBCUDACXX_INLINE_VISIBILITY + ~unique_lock() + { + if (__owns_) + __m_->unlock(); + } + +private: + unique_lock(unique_lock const&); // = delete; + unique_lock& operator=(unique_lock const&); // = delete; + +public: +#ifndef _LIBCUDACXX_CXX03_LANG + _LIBCUDACXX_INLINE_VISIBILITY + unique_lock(unique_lock&& __u) _NOEXCEPT + : __m_(__u.__m_), __owns_(__u.__owns_) + {__u.__m_ = nullptr; __u.__owns_ = false;} + _LIBCUDACXX_INLINE_VISIBILITY + unique_lock& operator=(unique_lock&& __u) _NOEXCEPT + { + if (__owns_) + __m_->unlock(); + __m_ = __u.__m_; + __owns_ = __u.__owns_; + __u.__m_ = nullptr; + __u.__owns_ = false; + return *this; + } + +#endif // _LIBCUDACXX_CXX03_LANG + + void lock(); + bool try_lock(); + + template + bool try_lock_for(const chrono::duration<_Rep, _Period>& __d); + template + bool try_lock_until(const chrono::time_point<_Clock, _Duration>& __t); + + void unlock(); + + _LIBCUDACXX_INLINE_VISIBILITY + void swap(unique_lock& __u) _NOEXCEPT + { + _CUDA_VSTD::swap(__m_, __u.__m_); + _CUDA_VSTD::swap(__owns_, __u.__owns_); + } + _LIBCUDACXX_INLINE_VISIBILITY + mutex_type* release() _NOEXCEPT + { + mutex_type* __m = __m_; + __m_ = nullptr; + __owns_ = false; + return __m; + } + + _LIBCUDACXX_INLINE_VISIBILITY + bool owns_lock() const _NOEXCEPT {return __owns_;} + _LIBCUDACXX_INLINE_VISIBILITY + _LIBCUDACXX_EXPLICIT + operator bool () const _NOEXCEPT {return __owns_;} + _LIBCUDACXX_INLINE_VISIBILITY + mutex_type* mutex() const _NOEXCEPT {return __m_;} +}; + +template +void +unique_lock<_Mutex>::lock() +{ + if (__m_ == nullptr) + __throw_system_error(EPERM, "unique_lock::lock: references null mutex"); + if (__owns_) + __throw_system_error(EDEADLK, "unique_lock::lock: already locked"); + __m_->lock(); + __owns_ = true; +} + +template +bool +unique_lock<_Mutex>::try_lock() +{ + if (__m_ == nullptr) + __throw_system_error(EPERM, "unique_lock::try_lock: references null mutex"); + if (__owns_) + __throw_system_error(EDEADLK, "unique_lock::try_lock: already locked"); + __owns_ = __m_->try_lock(); + return __owns_; +} + +template +template +bool +unique_lock<_Mutex>::try_lock_for(const chrono::duration<_Rep, _Period>& __d) +{ + if (__m_ == nullptr) + __throw_system_error(EPERM, "unique_lock::try_lock_for: references null mutex"); + if (__owns_) + __throw_system_error(EDEADLK, "unique_lock::try_lock_for: already locked"); + __owns_ = __m_->try_lock_for(__d); + return __owns_; +} + +template +template +bool +unique_lock<_Mutex>::try_lock_until(const chrono::time_point<_Clock, _Duration>& __t) +{ + if (__m_ == nullptr) + __throw_system_error(EPERM, "unique_lock::try_lock_until: references null mutex"); + if (__owns_) + __throw_system_error(EDEADLK, "unique_lock::try_lock_until: already locked"); + __owns_ = __m_->try_lock_until(__t); + return __owns_; +} + +template +void +unique_lock<_Mutex>::unlock() +{ + if (!__owns_) + __throw_system_error(EPERM, "unique_lock::unlock: not locked"); + __m_->unlock(); + __owns_ = false; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) _NOEXCEPT + {__x.swap(__y);} + +//enum class cv_status +_LIBCUDACXX_DECLARE_STRONG_ENUM(cv_status) +{ + no_timeout, + timeout +}; +_LIBCUDACXX_DECLARE_STRONG_ENUM_EPILOG(cv_status) + +class _LIBCUDACXX_TYPE_VIS condition_variable +{ + __libcpp_condvar_t __cv_ = _LIBCUDACXX_CONDVAR_INITIALIZER; +public: + _LIBCUDACXX_INLINE_VISIBILITY + _LIBCUDACXX_CONSTEXPR condition_variable() _NOEXCEPT = default; + +#ifdef _LIBCUDACXX_HAS_TRIVIAL_CONDVAR_DESTRUCTION + ~condition_variable() = default; +#else + ~condition_variable(); +#endif + + condition_variable(const condition_variable&) = delete; + condition_variable& operator=(const condition_variable&) = delete; + + void notify_one() _NOEXCEPT; + void notify_all() _NOEXCEPT; + + void wait(unique_lock& __lk) _NOEXCEPT; + template + _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS + void wait(unique_lock& __lk, _Predicate __pred); + + template + _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS + cv_status + wait_until(unique_lock& __lk, + const chrono::time_point<_Clock, _Duration>& __t); + + template + _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS + bool + wait_until(unique_lock& __lk, + const chrono::time_point<_Clock, _Duration>& __t, + _Predicate __pred); + + template + _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS + cv_status + wait_for(unique_lock& __lk, + const chrono::duration<_Rep, _Period>& __d); + + template + bool + _LIBCUDACXX_INLINE_VISIBILITY + wait_for(unique_lock& __lk, + const chrono::duration<_Rep, _Period>& __d, + _Predicate __pred); + + typedef __libcpp_condvar_t* native_handle_type; + _LIBCUDACXX_INLINE_VISIBILITY native_handle_type native_handle() {return &__cv_;} + +private: + void __do_timed_wait(unique_lock& __lk, + chrono::time_point) _NOEXCEPT; +#if defined(_LIBCUDACXX_HAS_COND_CLOCKWAIT) + void __do_timed_wait(unique_lock& __lk, + chrono::time_point) _NOEXCEPT; +#endif + template + void __do_timed_wait(unique_lock& __lk, + chrono::time_point<_Clock, chrono::nanoseconds>) _NOEXCEPT; +}; +#endif // !_LIBCUDACXX_HAS_NO_THREADS + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename enable_if +< + is_floating_point<_Rep>::value, + chrono::nanoseconds +>::type +__safe_nanosecond_cast(chrono::duration<_Rep, _Period> __d) +{ + using namespace chrono; + using __ratio = ratio_divide<_Period, nano>; + using __ns_rep = nanoseconds::rep; + _Rep __result_float = __d.count() * __ratio::num / __ratio::den; + + _Rep __result_max = numeric_limits<__ns_rep>::max(); + if (__result_float >= __result_max) { + return nanoseconds::max(); + } + + _Rep __result_min = numeric_limits<__ns_rep>::min(); + if (__result_float <= __result_min) { + return nanoseconds::min(); + } + + return nanoseconds(static_cast<__ns_rep>(__result_float)); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +typename enable_if +< + !is_floating_point<_Rep>::value, + chrono::nanoseconds +>::type +__safe_nanosecond_cast(chrono::duration<_Rep, _Period> __d) +{ + using namespace chrono; + if (__d.count() == 0) { + return nanoseconds(0); + } + + using __ratio = ratio_divide<_Period, nano>; + using __ns_rep = nanoseconds::rep; + __ns_rep __result_max = std::numeric_limits<__ns_rep>::max(); + if (__d.count() > 0 && __d.count() > __result_max / __ratio::num) { + return nanoseconds::max(); + } + + __ns_rep __result_min = std::numeric_limits<__ns_rep>::min(); + if (__d.count() < 0 && __d.count() < __result_min / __ratio::num) { + return nanoseconds::min(); + } + + __ns_rep __result = __d.count() * __ratio::num / __ratio::den; + if (__result == 0) { + return nanoseconds(1); + } + + return nanoseconds(__result); +} + +#ifndef _LIBCUDACXX_HAS_NO_THREADS +template +void +condition_variable::wait(unique_lock& __lk, _Predicate __pred) +{ + while (!__pred()) + wait(__lk); +} + +template +cv_status +condition_variable::wait_until(unique_lock& __lk, + const chrono::time_point<_Clock, _Duration>& __t) +{ + using namespace chrono; + using __clock_tp_ns = time_point<_Clock, nanoseconds>; + + typename _Clock::time_point __now = _Clock::now(); + if (__t <= __now) + return cv_status::timeout; + + __clock_tp_ns __t_ns = __clock_tp_ns(__safe_nanosecond_cast(__t.time_since_epoch())); + + __do_timed_wait(__lk, __t_ns); + return _Clock::now() < __t ? cv_status::no_timeout : cv_status::timeout; +} + +template +bool +condition_variable::wait_until(unique_lock& __lk, + const chrono::time_point<_Clock, _Duration>& __t, + _Predicate __pred) +{ + while (!__pred()) + { + if (wait_until(__lk, __t) == cv_status::timeout) + return __pred(); + } + return true; +} + +template +cv_status +condition_variable::wait_for(unique_lock& __lk, + const chrono::duration<_Rep, _Period>& __d) +{ + using namespace chrono; + if (__d <= __d.zero()) + return cv_status::timeout; + using __ns_rep = nanoseconds::rep; + steady_clock::time_point __c_now = steady_clock::now(); + +#if defined(_LIBCUDACXX_HAS_COND_CLOCKWAIT) + using __clock_tp_ns = time_point; + __ns_rep __now_count_ns = __safe_nanosecond_cast(__c_now.time_since_epoch()).count(); +#else + using __clock_tp_ns = time_point; + __ns_rep __now_count_ns = __safe_nanosecond_cast(system_clock::now().time_since_epoch()).count(); +#endif + + __ns_rep __d_ns_count = __safe_nanosecond_cast(__d).count(); + + if (__now_count_ns > numeric_limits<__ns_rep>::max() - __d_ns_count) { + __do_timed_wait(__lk, __clock_tp_ns::max()); + } else { + __do_timed_wait(__lk, __clock_tp_ns(nanoseconds(__now_count_ns + __d_ns_count))); + } + + return steady_clock::now() - __c_now < __d ? cv_status::no_timeout : + cv_status::timeout; +} + +template +inline +bool +condition_variable::wait_for(unique_lock& __lk, + const chrono::duration<_Rep, _Period>& __d, + _Predicate __pred) +{ + return wait_until(__lk, chrono::steady_clock::now() + __d, + _CUDA_VSTD::move(__pred)); +} + +#if defined(_LIBCUDACXX_HAS_COND_CLOCKWAIT) +inline +void +condition_variable::__do_timed_wait(unique_lock& __lk, + chrono::time_point __tp) _NOEXCEPT +{ + using namespace chrono; + if (!__lk.owns_lock()) + __throw_system_error(EPERM, + "condition_variable::timed wait: mutex not locked"); + nanoseconds __d = __tp.time_since_epoch(); + timespec __ts; + seconds __s = duration_cast(__d); + using __ts_sec = decltype(__ts.tv_sec); + const __ts_sec __ts_sec_max = numeric_limits<__ts_sec>::max(); + if (__s.count() < __ts_sec_max) + { + __ts.tv_sec = static_cast<__ts_sec>(__s.count()); + __ts.tv_nsec = (__d - __s).count(); + } + else + { + __ts.tv_sec = __ts_sec_max; + __ts.tv_nsec = giga::num - 1; + } + int __ec = pthread_cond_clockwait(&__cv_, __lk.mutex()->native_handle(), CLOCK_MONOTONIC, &__ts); + if (__ec != 0 && __ec != ETIMEDOUT) + __throw_system_error(__ec, "condition_variable timed_wait failed"); +} +#endif // _LIBCUDACXX_HAS_COND_CLOCKWAIT + +template +inline +void +condition_variable::__do_timed_wait(unique_lock& __lk, + chrono::time_point<_Clock, chrono::nanoseconds> __tp) _NOEXCEPT +{ + wait_for(__lk, __tp - _Clock::now()); +} + +#endif // !_LIBCUDACXX_HAS_NO_THREADS + +_LIBCUDACXX_END_NAMESPACE_STD + +_LIBCUDACXX_POP_MACROS + +#endif // _LIBCUDACXX___MUTEX_BASE diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__node_handle b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__node_handle new file mode 100644 index 000000000000..7a6e61af049b --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__node_handle @@ -0,0 +1,208 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___NODE_HANDLE +#define _LIBCUDACXX___NODE_HANDLE + +#include <__config> +#include +#include + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_PUSH_MACROS +#include <__undef_macros> + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +#if _LIBCUDACXX_STD_VER > 14 + +// Specialized in __tree & __hash_table for their _NodeType. +template +struct __generic_container_node_destructor; + +template class _MapOrSetSpecifics> +class _LIBCUDACXX_TEMPLATE_VIS __basic_node_handle + : public _MapOrSetSpecifics< + _NodeType, + __basic_node_handle<_NodeType, _Alloc, _MapOrSetSpecifics>> +{ + template + friend class __tree; + template + friend class __hash_table; + friend struct _MapOrSetSpecifics< + _NodeType, __basic_node_handle<_NodeType, _Alloc, _MapOrSetSpecifics>>; + + typedef allocator_traits<_Alloc> __alloc_traits; + typedef typename __rebind_pointer::type + __node_pointer_type; + +public: + typedef _Alloc allocator_type; + +private: + __node_pointer_type __ptr_ = nullptr; + optional __alloc_; + + _LIBCUDACXX_INLINE_VISIBILITY + void __release_ptr() + { + __ptr_ = nullptr; + __alloc_ = _CUDA_VSTD::nullopt; + } + + _LIBCUDACXX_INLINE_VISIBILITY + void __destroy_node_pointer() + { + if (__ptr_ != nullptr) + { + typedef typename __allocator_traits_rebind< + allocator_type, _NodeType>::type __node_alloc_type; + __node_alloc_type __alloc(*__alloc_); + __generic_container_node_destructor<_NodeType, __node_alloc_type>( + __alloc, true)(__ptr_); + __ptr_ = nullptr; + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + __basic_node_handle(__node_pointer_type __ptr, + allocator_type const& __alloc) + : __ptr_(__ptr), __alloc_(__alloc) + { + } + +public: + _LIBCUDACXX_INLINE_VISIBILITY + __basic_node_handle() = default; + + _LIBCUDACXX_INLINE_VISIBILITY + __basic_node_handle(__basic_node_handle&& __other) noexcept + : __ptr_(__other.__ptr_), + __alloc_(_CUDA_VSTD::move(__other.__alloc_)) + { + __other.__ptr_ = nullptr; + __other.__alloc_ = _CUDA_VSTD::nullopt; + } + + _LIBCUDACXX_INLINE_VISIBILITY + __basic_node_handle& operator=(__basic_node_handle&& __other) + { + _LIBCUDACXX_ASSERT( + __alloc_ == _CUDA_VSTD::nullopt || + __alloc_traits::propagate_on_container_move_assignment::value || + __alloc_ == __other.__alloc_, + "node_type with incompatible allocator passed to " + "node_type::operator=(node_type&&)"); + + __destroy_node_pointer(); + __ptr_ = __other.__ptr_; + + if (__alloc_traits::propagate_on_container_move_assignment::value || + __alloc_ == _CUDA_VSTD::nullopt) + __alloc_ = _CUDA_VSTD::move(__other.__alloc_); + + __other.__ptr_ = nullptr; + __other.__alloc_ = _CUDA_VSTD::nullopt; + + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY + allocator_type get_allocator() const { return *__alloc_; } + + _LIBCUDACXX_INLINE_VISIBILITY + explicit operator bool() const { return __ptr_ != nullptr; } + + _LIBCUDACXX_NODISCARD_AFTER_CXX17 _LIBCUDACXX_INLINE_VISIBILITY + bool empty() const { return __ptr_ == nullptr; } + + _LIBCUDACXX_INLINE_VISIBILITY + void swap(__basic_node_handle& __other) noexcept( + __alloc_traits::propagate_on_container_swap::value || + __alloc_traits::is_always_equal::value) + { + using _CUDA_VSTD::swap; + swap(__ptr_, __other.__ptr_); + if (__alloc_traits::propagate_on_container_swap::value || + __alloc_ == _CUDA_VSTD::nullopt || __other.__alloc_ == _CUDA_VSTD::nullopt) + swap(__alloc_, __other.__alloc_); + } + + _LIBCUDACXX_INLINE_VISIBILITY + friend void swap(__basic_node_handle& __a, __basic_node_handle& __b) + noexcept(noexcept(__a.swap(__b))) { __a.swap(__b); } + + _LIBCUDACXX_INLINE_VISIBILITY + ~__basic_node_handle() + { + __destroy_node_pointer(); + } +}; + +template +struct __set_node_handle_specifics +{ + typedef typename _NodeType::__node_value_type value_type; + + _LIBCUDACXX_INLINE_VISIBILITY + value_type& value() const + { + return static_cast<_Derived const*>(this)->__ptr_->__value_; + } +}; + +template +struct __map_node_handle_specifics +{ + typedef typename _NodeType::__node_value_type::key_type key_type; + typedef typename _NodeType::__node_value_type::mapped_type mapped_type; + + _LIBCUDACXX_INLINE_VISIBILITY + key_type& key() const + { + return static_cast<_Derived const*>(this)-> + __ptr_->__value_.__ref().first; + } + + _LIBCUDACXX_INLINE_VISIBILITY + mapped_type& mapped() const + { + return static_cast<_Derived const*>(this)-> + __ptr_->__value_.__ref().second; + } +}; + +template +using __set_node_handle = + __basic_node_handle< _NodeType, _Alloc, __set_node_handle_specifics>; + +template +using __map_node_handle = + __basic_node_handle< _NodeType, _Alloc, __map_node_handle_specifics>; + +template +struct _LIBCUDACXX_TEMPLATE_VIS __insert_return_type +{ + _Iterator position; + bool inserted; + _NodeType node; +}; + +#endif // _LIBCUDACXX_STD_VER > 14 + +_LIBCUDACXX_END_NAMESPACE_STD +_LIBCUDACXX_POP_MACROS + +#endif diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__nullptr b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__nullptr new file mode 100644 index 000000000000..bf3c3c385ac8 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__nullptr @@ -0,0 +1,61 @@ +// -*- C++ -*- +//===--------------------------- __nullptr --------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX_NULLPTR +#define _LIBCUDACXX_NULLPTR + +#include <__config> + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +#ifdef _LIBCUDACXX_HAS_NO_NULLPTR + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +struct _LIBCUDACXX_TEMPLATE_VIS nullptr_t +{ + void* __lx; + + struct __nat {int __for_bool_;}; + + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR nullptr_t() : __lx(0) {} + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR nullptr_t(int __nat::*) : __lx(0) {} + + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR operator int __nat::*() const {return 0;} + + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR + operator _Tp* () const {return 0;} + + template + _LIBCUDACXX_INLINE_VISIBILITY + operator _Tp _Up::* () const {return 0;} + + friend _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR bool operator==(nullptr_t, nullptr_t) {return true;} + friend _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR bool operator!=(nullptr_t, nullptr_t) {return false;} +}; + +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR nullptr_t __get_nullptr_t() {return nullptr_t(0);} + +#define nullptr _CUDA_VSTD::__get_nullptr_t() + +_LIBCUDACXX_END_NAMESPACE_STD + +#else // _LIBCUDACXX_HAS_NO_NULLPTR + +namespace std +{ + typedef decltype(nullptr) nullptr_t; +} + +#endif // _LIBCUDACXX_HAS_NO_NULLPTR + +#endif // _LIBCUDACXX_NULLPTR diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_pop b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_pop new file mode 100644 index 000000000000..27a9a68b4e60 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_pop @@ -0,0 +1,16 @@ +// -*- C++ -*- +//===---------------------------- chrono ----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#if defined(_LIBCUDACXX_USE_PRAGMA_MSVC_WARNING) + #pragma warning(pop) +#endif + +#if defined(_LIBCUDACXX_POP_MACROS) + _LIBCUDACXX_POP_MACROS +#endif diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_push b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_push new file mode 100644 index 000000000000..ad34a38d9364 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_push @@ -0,0 +1,25 @@ +// -*- C++ -*- +//===---------------------------- chrono ----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) + #pragma GCC system_header +#endif + +#if defined(_LIBCUDACXX_USE_PRAGMA_MSVC_WARNING) + #pragma warning(push) + #pragma warning(disable : _LIBCUDACXX_MSVC_DISABLED_WARNINGS) +#endif + +#if defined(_LIBCUDACXX_PUSH_MACROS) + _LIBCUDACXX_PUSH_MACROS +#endif + +#ifndef __cuda_std__ +#include <__undef_macros> +#endif diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__split_buffer b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__split_buffer new file mode 100644 index 000000000000..d303f9478fbb --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__split_buffer @@ -0,0 +1,644 @@ +// -*- C++ -*- +#ifndef _LIBCUDACXX_SPLIT_BUFFER +#define _LIBCUDACXX_SPLIT_BUFFER + +#include <__config> +#include +#include + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_PUSH_MACROS +#include <__undef_macros> + + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +template +class __split_buffer_common +{ +protected: + void __throw_length_error() const; + void __throw_out_of_range() const; +}; + +template > +struct __split_buffer + : private __split_buffer_common +{ +private: + __split_buffer(const __split_buffer&); + __split_buffer& operator=(const __split_buffer&); +public: + typedef _Tp value_type; + typedef _Allocator allocator_type; + typedef typename remove_reference::type __alloc_rr; + typedef allocator_traits<__alloc_rr> __alloc_traits; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef typename __alloc_traits::size_type size_type; + typedef typename __alloc_traits::difference_type difference_type; + typedef typename __alloc_traits::pointer pointer; + typedef typename __alloc_traits::const_pointer const_pointer; + typedef pointer iterator; + typedef const_pointer const_iterator; + + pointer __first_; + pointer __begin_; + pointer __end_; + __compressed_pair __end_cap_; + + typedef typename add_lvalue_reference::type __alloc_ref; + typedef typename add_lvalue_reference::type __alloc_const_ref; + + _LIBCUDACXX_INLINE_VISIBILITY __alloc_rr& __alloc() _NOEXCEPT {return __end_cap_.second();} + _LIBCUDACXX_INLINE_VISIBILITY const __alloc_rr& __alloc() const _NOEXCEPT {return __end_cap_.second();} + _LIBCUDACXX_INLINE_VISIBILITY pointer& __end_cap() _NOEXCEPT {return __end_cap_.first();} + _LIBCUDACXX_INLINE_VISIBILITY const pointer& __end_cap() const _NOEXCEPT {return __end_cap_.first();} + + _LIBCUDACXX_INLINE_VISIBILITY + __split_buffer() + _NOEXCEPT_(is_nothrow_default_constructible::value); + _LIBCUDACXX_INLINE_VISIBILITY + explicit __split_buffer(__alloc_rr& __a); + _LIBCUDACXX_INLINE_VISIBILITY + explicit __split_buffer(const __alloc_rr& __a); + __split_buffer(size_type __cap, size_type __start, __alloc_rr& __a); + ~__split_buffer(); + +#ifndef _LIBCUDACXX_CXX03_LANG + __split_buffer(__split_buffer&& __c) + _NOEXCEPT_(is_nothrow_move_constructible::value); + __split_buffer(__split_buffer&& __c, const __alloc_rr& __a); + __split_buffer& operator=(__split_buffer&& __c) + _NOEXCEPT_((__alloc_traits::propagate_on_container_move_assignment::value && + is_nothrow_move_assignable::value) || + !__alloc_traits::propagate_on_container_move_assignment::value); +#endif // _LIBCUDACXX_CXX03_LANG + + _LIBCUDACXX_INLINE_VISIBILITY iterator begin() _NOEXCEPT {return __begin_;} + _LIBCUDACXX_INLINE_VISIBILITY const_iterator begin() const _NOEXCEPT {return __begin_;} + _LIBCUDACXX_INLINE_VISIBILITY iterator end() _NOEXCEPT {return __end_;} + _LIBCUDACXX_INLINE_VISIBILITY const_iterator end() const _NOEXCEPT {return __end_;} + + _LIBCUDACXX_INLINE_VISIBILITY + void clear() _NOEXCEPT + {__destruct_at_end(__begin_);} + _LIBCUDACXX_INLINE_VISIBILITY size_type size() const {return static_cast(__end_ - __begin_);} + _LIBCUDACXX_INLINE_VISIBILITY bool empty() const {return __end_ == __begin_;} + _LIBCUDACXX_INLINE_VISIBILITY size_type capacity() const {return static_cast(__end_cap() - __first_);} + _LIBCUDACXX_INLINE_VISIBILITY size_type __front_spare() const {return static_cast(__begin_ - __first_);} + _LIBCUDACXX_INLINE_VISIBILITY size_type __back_spare() const {return static_cast(__end_cap() - __end_);} + + _LIBCUDACXX_INLINE_VISIBILITY reference front() {return *__begin_;} + _LIBCUDACXX_INLINE_VISIBILITY const_reference front() const {return *__begin_;} + _LIBCUDACXX_INLINE_VISIBILITY reference back() {return *(__end_ - 1);} + _LIBCUDACXX_INLINE_VISIBILITY const_reference back() const {return *(__end_ - 1);} + + void reserve(size_type __n); + void shrink_to_fit() _NOEXCEPT; + void push_front(const_reference __x); + _LIBCUDACXX_INLINE_VISIBILITY void push_back(const_reference __x); +#ifndef _LIBCUDACXX_CXX03_LANG + void push_front(value_type&& __x); + void push_back(value_type&& __x); + template + void emplace_back(_Args&&... __args); +#endif // !defined(_LIBCUDACXX_CXX03_LANG) + + _LIBCUDACXX_INLINE_VISIBILITY void pop_front() {__destruct_at_begin(__begin_+1);} + _LIBCUDACXX_INLINE_VISIBILITY void pop_back() {__destruct_at_end(__end_-1);} + + void __construct_at_end(size_type __n); + void __construct_at_end(size_type __n, const_reference __x); + template + typename enable_if + < + __is_input_iterator<_InputIter>::value && + !__is_forward_iterator<_InputIter>::value, + void + >::type + __construct_at_end(_InputIter __first, _InputIter __last); + template + typename enable_if + < + __is_forward_iterator<_ForwardIterator>::value, + void + >::type + __construct_at_end(_ForwardIterator __first, _ForwardIterator __last); + + _LIBCUDACXX_INLINE_VISIBILITY void __destruct_at_begin(pointer __new_begin) + {__destruct_at_begin(__new_begin, is_trivially_destructible());} + _LIBCUDACXX_INLINE_VISIBILITY + void __destruct_at_begin(pointer __new_begin, false_type); + _LIBCUDACXX_INLINE_VISIBILITY + void __destruct_at_begin(pointer __new_begin, true_type); + + _LIBCUDACXX_INLINE_VISIBILITY + void __destruct_at_end(pointer __new_last) _NOEXCEPT + {__destruct_at_end(__new_last, false_type());} + _LIBCUDACXX_INLINE_VISIBILITY + void __destruct_at_end(pointer __new_last, false_type) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + void __destruct_at_end(pointer __new_last, true_type) _NOEXCEPT; + + void swap(__split_buffer& __x) + _NOEXCEPT_(!__alloc_traits::propagate_on_container_swap::value|| + __is_nothrow_swappable<__alloc_rr>::value); + + bool __invariants() const; + +private: + _LIBCUDACXX_INLINE_VISIBILITY + void __move_assign_alloc(__split_buffer& __c, true_type) + _NOEXCEPT_(is_nothrow_move_assignable::value) + { + __alloc() = _CUDA_VSTD::move(__c.__alloc()); + } + + _LIBCUDACXX_INLINE_VISIBILITY + void __move_assign_alloc(__split_buffer&, false_type) _NOEXCEPT + {} + + struct _ConstructTransaction { + explicit _ConstructTransaction(pointer* __p, size_type __n) _NOEXCEPT + : __pos_(*__p), __end_(*__p + __n), __dest_(__p) { + } + ~_ConstructTransaction() { + *__dest_ = __pos_; + } + pointer __pos_; + const pointer __end_; + private: + pointer *__dest_; + }; +}; + +template +bool +__split_buffer<_Tp, _Allocator>::__invariants() const +{ + if (__first_ == nullptr) + { + if (__begin_ != nullptr) + return false; + if (__end_ != nullptr) + return false; + if (__end_cap() != nullptr) + return false; + } + else + { + if (__begin_ < __first_) + return false; + if (__end_ < __begin_) + return false; + if (__end_cap() < __end_) + return false; + } + return true; +} + +// Default constructs __n objects starting at __end_ +// throws if construction throws +// Precondition: __n > 0 +// Precondition: size() + __n <= capacity() +// Postcondition: size() == size() + __n +template +void +__split_buffer<_Tp, _Allocator>::__construct_at_end(size_type __n) +{ + _ConstructTransaction __tx(&this->__end_, __n); + for (; __tx.__pos_ != __tx.__end_; ++__tx.__pos_) { + __alloc_traits::construct(this->__alloc(), _CUDA_VSTD::__to_raw_pointer(__tx.__pos_)); + } +} + +// Copy constructs __n objects starting at __end_ from __x +// throws if construction throws +// Precondition: __n > 0 +// Precondition: size() + __n <= capacity() +// Postcondition: size() == old size() + __n +// Postcondition: [i] == __x for all i in [size() - __n, __n) +template +void +__split_buffer<_Tp, _Allocator>::__construct_at_end(size_type __n, const_reference __x) +{ + _ConstructTransaction __tx(&this->__end_, __n); + for (; __tx.__pos_ != __tx.__end_; ++__tx.__pos_) { + __alloc_traits::construct(this->__alloc(), + _CUDA_VSTD::__to_raw_pointer(__tx.__pos_), __x); + } +} + +template +template +typename enable_if +< + __is_input_iterator<_InputIter>::value && + !__is_forward_iterator<_InputIter>::value, + void +>::type +__split_buffer<_Tp, _Allocator>::__construct_at_end(_InputIter __first, _InputIter __last) +{ + __alloc_rr& __a = this->__alloc(); + for (; __first != __last; ++__first) + { + if (__end_ == __end_cap()) + { + size_type __old_cap = __end_cap() - __first_; + size_type __new_cap = _CUDA_VSTD::max(2 * __old_cap, 8); + __split_buffer __buf(__new_cap, 0, __a); + for (pointer __p = __begin_; __p != __end_; ++__p, ++__buf.__end_) + __alloc_traits::construct(__buf.__alloc(), + _CUDA_VSTD::__to_raw_pointer(__buf.__end_), _CUDA_VSTD::move(*__p)); + swap(__buf); + } + __alloc_traits::construct(__a, _CUDA_VSTD::__to_raw_pointer(this->__end_), *__first); + ++this->__end_; + } +} + +template +template +typename enable_if +< + __is_forward_iterator<_ForwardIterator>::value, + void +>::type +__split_buffer<_Tp, _Allocator>::__construct_at_end(_ForwardIterator __first, _ForwardIterator __last) +{ + _ConstructTransaction __tx(&this->__end_, std::distance(__first, __last)); + for (; __tx.__pos_ != __tx.__end_; ++__tx.__pos_, ++__first) { + __alloc_traits::construct(this->__alloc(), + _CUDA_VSTD::__to_raw_pointer(__tx.__pos_), *__first); + } +} + +template +inline +void +__split_buffer<_Tp, _Allocator>::__destruct_at_begin(pointer __new_begin, false_type) +{ + while (__begin_ != __new_begin) + __alloc_traits::destroy(__alloc(), __to_raw_pointer(__begin_++)); +} + +template +inline +void +__split_buffer<_Tp, _Allocator>::__destruct_at_begin(pointer __new_begin, true_type) +{ + __begin_ = __new_begin; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +__split_buffer<_Tp, _Allocator>::__destruct_at_end(pointer __new_last, false_type) _NOEXCEPT +{ + while (__new_last != __end_) + __alloc_traits::destroy(__alloc(), __to_raw_pointer(--__end_)); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +__split_buffer<_Tp, _Allocator>::__destruct_at_end(pointer __new_last, true_type) _NOEXCEPT +{ + __end_ = __new_last; +} + +template +__split_buffer<_Tp, _Allocator>::__split_buffer(size_type __cap, size_type __start, __alloc_rr& __a) + : __end_cap_(nullptr, __a) +{ + __first_ = __cap != 0 ? __alloc_traits::allocate(__alloc(), __cap) : nullptr; + __begin_ = __end_ = __first_ + __start; + __end_cap() = __first_ + __cap; +} + +template +inline +__split_buffer<_Tp, _Allocator>::__split_buffer() + _NOEXCEPT_(is_nothrow_default_constructible::value) + : __first_(nullptr), __begin_(nullptr), __end_(nullptr), __end_cap_(nullptr) +{ +} + +template +inline +__split_buffer<_Tp, _Allocator>::__split_buffer(__alloc_rr& __a) + : __first_(nullptr), __begin_(nullptr), __end_(nullptr), __end_cap_(nullptr, __a) +{ +} + +template +inline +__split_buffer<_Tp, _Allocator>::__split_buffer(const __alloc_rr& __a) + : __first_(nullptr), __begin_(nullptr), __end_(nullptr), __end_cap_(nullptr, __a) +{ +} + +template +__split_buffer<_Tp, _Allocator>::~__split_buffer() +{ + clear(); + if (__first_) + __alloc_traits::deallocate(__alloc(), __first_, capacity()); +} + +#ifndef _LIBCUDACXX_CXX03_LANG + +template +__split_buffer<_Tp, _Allocator>::__split_buffer(__split_buffer&& __c) + _NOEXCEPT_(is_nothrow_move_constructible::value) + : __first_(_CUDA_VSTD::move(__c.__first_)), + __begin_(_CUDA_VSTD::move(__c.__begin_)), + __end_(_CUDA_VSTD::move(__c.__end_)), + __end_cap_(_CUDA_VSTD::move(__c.__end_cap_)) +{ + __c.__first_ = nullptr; + __c.__begin_ = nullptr; + __c.__end_ = nullptr; + __c.__end_cap() = nullptr; +} + +template +__split_buffer<_Tp, _Allocator>::__split_buffer(__split_buffer&& __c, const __alloc_rr& __a) + : __end_cap_(__second_tag(), __a) +{ + if (__a == __c.__alloc()) + { + __first_ = __c.__first_; + __begin_ = __c.__begin_; + __end_ = __c.__end_; + __end_cap() = __c.__end_cap(); + __c.__first_ = nullptr; + __c.__begin_ = nullptr; + __c.__end_ = nullptr; + __c.__end_cap() = nullptr; + } + else + { + size_type __cap = __c.size(); + __first_ = __alloc_traits::allocate(__alloc(), __cap); + __begin_ = __end_ = __first_; + __end_cap() = __first_ + __cap; + typedef move_iterator _Ip; + __construct_at_end(_Ip(__c.begin()), _Ip(__c.end())); + } +} + +template +__split_buffer<_Tp, _Allocator>& +__split_buffer<_Tp, _Allocator>::operator=(__split_buffer&& __c) + _NOEXCEPT_((__alloc_traits::propagate_on_container_move_assignment::value && + is_nothrow_move_assignable::value) || + !__alloc_traits::propagate_on_container_move_assignment::value) +{ + clear(); + shrink_to_fit(); + __first_ = __c.__first_; + __begin_ = __c.__begin_; + __end_ = __c.__end_; + __end_cap() = __c.__end_cap(); + __move_assign_alloc(__c, + integral_constant()); + __c.__first_ = __c.__begin_ = __c.__end_ = __c.__end_cap() = nullptr; + return *this; +} + +#endif // _LIBCUDACXX_CXX03_LANG + +template +void +__split_buffer<_Tp, _Allocator>::swap(__split_buffer& __x) + _NOEXCEPT_(!__alloc_traits::propagate_on_container_swap::value|| + __is_nothrow_swappable<__alloc_rr>::value) +{ + _CUDA_VSTD::swap(__first_, __x.__first_); + _CUDA_VSTD::swap(__begin_, __x.__begin_); + _CUDA_VSTD::swap(__end_, __x.__end_); + _CUDA_VSTD::swap(__end_cap(), __x.__end_cap()); + __swap_allocator(__alloc(), __x.__alloc()); +} + +template +void +__split_buffer<_Tp, _Allocator>::reserve(size_type __n) +{ + if (__n < capacity()) + { + __split_buffer __t(__n, 0, __alloc()); + __t.__construct_at_end(move_iterator(__begin_), + move_iterator(__end_)); + _CUDA_VSTD::swap(__first_, __t.__first_); + _CUDA_VSTD::swap(__begin_, __t.__begin_); + _CUDA_VSTD::swap(__end_, __t.__end_); + _CUDA_VSTD::swap(__end_cap(), __t.__end_cap()); + } +} + +template +void +__split_buffer<_Tp, _Allocator>::shrink_to_fit() _NOEXCEPT +{ + if (capacity() > size()) + { +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + try + { +#endif // _LIBCUDACXX_NO_EXCEPTIONS + __split_buffer __t(size(), 0, __alloc()); + __t.__construct_at_end(move_iterator(__begin_), + move_iterator(__end_)); + __t.__end_ = __t.__begin_ + (__end_ - __begin_); + _CUDA_VSTD::swap(__first_, __t.__first_); + _CUDA_VSTD::swap(__begin_, __t.__begin_); + _CUDA_VSTD::swap(__end_, __t.__end_); + _CUDA_VSTD::swap(__end_cap(), __t.__end_cap()); +#ifndef _LIBCUDACXX_NO_EXCEPTIONS + } + catch (...) + { + } +#endif // _LIBCUDACXX_NO_EXCEPTIONS + } +} + +template +void +__split_buffer<_Tp, _Allocator>::push_front(const_reference __x) +{ + if (__begin_ == __first_) + { + if (__end_ < __end_cap()) + { + difference_type __d = __end_cap() - __end_; + __d = (__d + 1) / 2; + __begin_ = _CUDA_VSTD::move_backward(__begin_, __end_, __end_ + __d); + __end_ += __d; + } + else + { + size_type __c = max(2 * static_cast(__end_cap() - __first_), 1); + __split_buffer __t(__c, (__c + 3) / 4, __alloc()); + __t.__construct_at_end(move_iterator(__begin_), + move_iterator(__end_)); + _CUDA_VSTD::swap(__first_, __t.__first_); + _CUDA_VSTD::swap(__begin_, __t.__begin_); + _CUDA_VSTD::swap(__end_, __t.__end_); + _CUDA_VSTD::swap(__end_cap(), __t.__end_cap()); + } + } + __alloc_traits::construct(__alloc(), _CUDA_VSTD::__to_raw_pointer(__begin_-1), __x); + --__begin_; +} + +#ifndef _LIBCUDACXX_CXX03_LANG + +template +void +__split_buffer<_Tp, _Allocator>::push_front(value_type&& __x) +{ + if (__begin_ == __first_) + { + if (__end_ < __end_cap()) + { + difference_type __d = __end_cap() - __end_; + __d = (__d + 1) / 2; + __begin_ = _CUDA_VSTD::move_backward(__begin_, __end_, __end_ + __d); + __end_ += __d; + } + else + { + size_type __c = max(2 * static_cast(__end_cap() - __first_), 1); + __split_buffer __t(__c, (__c + 3) / 4, __alloc()); + __t.__construct_at_end(move_iterator(__begin_), + move_iterator(__end_)); + _CUDA_VSTD::swap(__first_, __t.__first_); + _CUDA_VSTD::swap(__begin_, __t.__begin_); + _CUDA_VSTD::swap(__end_, __t.__end_); + _CUDA_VSTD::swap(__end_cap(), __t.__end_cap()); + } + } + __alloc_traits::construct(__alloc(), _CUDA_VSTD::__to_raw_pointer(__begin_-1), + _CUDA_VSTD::move(__x)); + --__begin_; +} + +#endif // _LIBCUDACXX_CXX03_LANG + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +__split_buffer<_Tp, _Allocator>::push_back(const_reference __x) +{ + if (__end_ == __end_cap()) + { + if (__begin_ > __first_) + { + difference_type __d = __begin_ - __first_; + __d = (__d + 1) / 2; + __end_ = _CUDA_VSTD::move(__begin_, __end_, __begin_ - __d); + __begin_ -= __d; + } + else + { + size_type __c = max(2 * static_cast(__end_cap() - __first_), 1); + __split_buffer __t(__c, __c / 4, __alloc()); + __t.__construct_at_end(move_iterator(__begin_), + move_iterator(__end_)); + _CUDA_VSTD::swap(__first_, __t.__first_); + _CUDA_VSTD::swap(__begin_, __t.__begin_); + _CUDA_VSTD::swap(__end_, __t.__end_); + _CUDA_VSTD::swap(__end_cap(), __t.__end_cap()); + } + } + __alloc_traits::construct(__alloc(), _CUDA_VSTD::__to_raw_pointer(__end_), __x); + ++__end_; +} + +#ifndef _LIBCUDACXX_CXX03_LANG + +template +void +__split_buffer<_Tp, _Allocator>::push_back(value_type&& __x) +{ + if (__end_ == __end_cap()) + { + if (__begin_ > __first_) + { + difference_type __d = __begin_ - __first_; + __d = (__d + 1) / 2; + __end_ = _CUDA_VSTD::move(__begin_, __end_, __begin_ - __d); + __begin_ -= __d; + } + else + { + size_type __c = max(2 * static_cast(__end_cap() - __first_), 1); + __split_buffer __t(__c, __c / 4, __alloc()); + __t.__construct_at_end(move_iterator(__begin_), + move_iterator(__end_)); + _CUDA_VSTD::swap(__first_, __t.__first_); + _CUDA_VSTD::swap(__begin_, __t.__begin_); + _CUDA_VSTD::swap(__end_, __t.__end_); + _CUDA_VSTD::swap(__end_cap(), __t.__end_cap()); + } + } + __alloc_traits::construct(__alloc(), _CUDA_VSTD::__to_raw_pointer(__end_), + _CUDA_VSTD::move(__x)); + ++__end_; +} + +template +template +void +__split_buffer<_Tp, _Allocator>::emplace_back(_Args&&... __args) +{ + if (__end_ == __end_cap()) + { + if (__begin_ > __first_) + { + difference_type __d = __begin_ - __first_; + __d = (__d + 1) / 2; + __end_ = _CUDA_VSTD::move(__begin_, __end_, __begin_ - __d); + __begin_ -= __d; + } + else + { + size_type __c = max(2 * static_cast(__end_cap() - __first_), 1); + __split_buffer __t(__c, __c / 4, __alloc()); + __t.__construct_at_end(move_iterator(__begin_), + move_iterator(__end_)); + _CUDA_VSTD::swap(__first_, __t.__first_); + _CUDA_VSTD::swap(__begin_, __t.__begin_); + _CUDA_VSTD::swap(__end_, __t.__end_); + _CUDA_VSTD::swap(__end_cap(), __t.__end_cap()); + } + } + __alloc_traits::construct(__alloc(), _CUDA_VSTD::__to_raw_pointer(__end_), + _CUDA_VSTD::forward<_Args>(__args)...); + ++__end_; +} + +#endif // _LIBCUDACXX_CXX03_LANG + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(__split_buffer<_Tp, _Allocator>& __x, __split_buffer<_Tp, _Allocator>& __y) + _NOEXCEPT_(_NOEXCEPT_(__x.swap(__y))) +{ + __x.swap(__y); +} + +_LIBCUDACXX_END_NAMESPACE_STD + +_LIBCUDACXX_POP_MACROS + +#endif // _LIBCUDACXX_SPLIT_BUFFER diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__sso_allocator b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__sso_allocator new file mode 100644 index 000000000000..94f774df4159 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__sso_allocator @@ -0,0 +1,76 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___SSO_ALLOCATOR +#define _LIBCUDACXX___SSO_ALLOCATOR + +#include <__config> +#include +#include + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +template class _LIBCUDACXX_HIDDEN __sso_allocator; + +template +class _LIBCUDACXX_HIDDEN __sso_allocator +{ +public: + typedef const void* const_pointer; + typedef void value_type; +}; + +template +class _LIBCUDACXX_HIDDEN __sso_allocator +{ + typename aligned_storage::type buf_; + bool __allocated_; +public: + typedef size_t size_type; + typedef _Tp* pointer; + typedef _Tp value_type; + + _LIBCUDACXX_INLINE_VISIBILITY __sso_allocator() throw() : __allocated_(false) {} + _LIBCUDACXX_INLINE_VISIBILITY __sso_allocator(const __sso_allocator&) throw() : __allocated_(false) {} + template _LIBCUDACXX_INLINE_VISIBILITY __sso_allocator(const __sso_allocator<_Up, _Np>&) throw() + : __allocated_(false) {} +private: + __sso_allocator& operator=(const __sso_allocator&); +public: + _LIBCUDACXX_INLINE_VISIBILITY pointer allocate(size_type __n, typename __sso_allocator::const_pointer = 0) + { + if (!__allocated_ && __n <= _Np) + { + __allocated_ = true; + return (pointer)&buf_; + } + return static_cast(_CUDA_VSTD::__libcpp_allocate(__n * sizeof(_Tp), _LIBCUDACXX_ALIGNOF(_Tp))); + } + _LIBCUDACXX_INLINE_VISIBILITY void deallocate(pointer __p, size_type __n) + { + if (__p == (pointer)&buf_) + __allocated_ = false; + else + _CUDA_VSTD::__libcpp_deallocate(__p, __n * sizeof(_Tp), _LIBCUDACXX_ALIGNOF(_Tp)); + } + _LIBCUDACXX_INLINE_VISIBILITY size_type max_size() const throw() {return size_type(~0) / sizeof(_Tp);} + + _LIBCUDACXX_INLINE_VISIBILITY + bool operator==(__sso_allocator& __a) const {return &buf_ == &__a.buf_;} + _LIBCUDACXX_INLINE_VISIBILITY + bool operator!=(__sso_allocator& __a) const {return &buf_ != &__a.buf_;} +}; + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _LIBCUDACXX___SSO_ALLOCATOR diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__std_stream b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__std_stream new file mode 100644 index 000000000000..996db0387345 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__std_stream @@ -0,0 +1,361 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___STD_STREAM +#define _LIBCUDACXX___STD_STREAM + +#include <__config> +#include +#include +#include <__locale> +#include + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_PUSH_MACROS +#include <__undef_macros> + + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +static const int __limit = 8; + +// __stdinbuf + +template +class _LIBCUDACXX_HIDDEN __stdinbuf + : public basic_streambuf<_CharT, char_traits<_CharT> > +{ +public: + typedef _CharT char_type; + typedef char_traits traits_type; + typedef typename traits_type::int_type int_type; + typedef typename traits_type::pos_type pos_type; + typedef typename traits_type::off_type off_type; + typedef typename traits_type::state_type state_type; + + __stdinbuf(FILE* __fp, state_type* __st); + +protected: + virtual int_type underflow(); + virtual int_type uflow(); + virtual int_type pbackfail(int_type __c = traits_type::eof()); + virtual void imbue(const locale& __loc); + +private: + + FILE* __file_; + const codecvt* __cv_; + state_type* __st_; + int __encoding_; + int_type __last_consumed_; + bool __last_consumed_is_next_; + bool __always_noconv_; + + __stdinbuf(const __stdinbuf&); + __stdinbuf& operator=(const __stdinbuf&); + + int_type __getchar(bool __consume); +}; + +template +__stdinbuf<_CharT>::__stdinbuf(FILE* __fp, state_type* __st) + : __file_(__fp), + __st_(__st), + __last_consumed_(traits_type::eof()), + __last_consumed_is_next_(false) +{ + imbue(this->getloc()); +} + +template +void +__stdinbuf<_CharT>::imbue(const locale& __loc) +{ + __cv_ = &use_facet >(__loc); + __encoding_ = __cv_->encoding(); + __always_noconv_ = __cv_->always_noconv(); + if (__encoding_ > __limit) + __throw_runtime_error("unsupported locale for standard input"); +} + +template +typename __stdinbuf<_CharT>::int_type +__stdinbuf<_CharT>::underflow() +{ + return __getchar(false); +} + +template +typename __stdinbuf<_CharT>::int_type +__stdinbuf<_CharT>::uflow() +{ + return __getchar(true); +} + +template +typename __stdinbuf<_CharT>::int_type +__stdinbuf<_CharT>::__getchar(bool __consume) +{ + if (__last_consumed_is_next_) + { + int_type __result = __last_consumed_; + if (__consume) + { + __last_consumed_ = traits_type::eof(); + __last_consumed_is_next_ = false; + } + return __result; + } + char __extbuf[__limit]; + int __nread = _CUDA_VSTD::max(1, __encoding_); + for (int __i = 0; __i < __nread; ++__i) + { + int __c = getc(__file_); + if (__c == EOF) + return traits_type::eof(); + __extbuf[__i] = static_cast(__c); + } + char_type __1buf; + if (__always_noconv_) + __1buf = static_cast(__extbuf[0]); + else + { + const char* __enxt; + char_type* __inxt; + codecvt_base::result __r; + do + { + state_type __sv_st = *__st_; + __r = __cv_->in(*__st_, __extbuf, __extbuf + __nread, __enxt, + &__1buf, &__1buf + 1, __inxt); + switch (__r) + { + case _CUDA_VSTD::codecvt_base::ok: + break; + case codecvt_base::partial: + *__st_ = __sv_st; + if (__nread == sizeof(__extbuf)) + return traits_type::eof(); + { + int __c = getc(__file_); + if (__c == EOF) + return traits_type::eof(); + __extbuf[__nread] = static_cast(__c); + } + ++__nread; + break; + case codecvt_base::error: + return traits_type::eof(); + case _CUDA_VSTD::codecvt_base::noconv: + __1buf = static_cast(__extbuf[0]); + break; + } + } while (__r == _CUDA_VSTD::codecvt_base::partial); + } + if (!__consume) + { + for (int __i = __nread; __i > 0;) + { + if (ungetc(traits_type::to_int_type(__extbuf[--__i]), __file_) == EOF) + return traits_type::eof(); + } + } + else + __last_consumed_ = traits_type::to_int_type(__1buf); + return traits_type::to_int_type(__1buf); +} + +template +typename __stdinbuf<_CharT>::int_type +__stdinbuf<_CharT>::pbackfail(int_type __c) +{ + if (traits_type::eq_int_type(__c, traits_type::eof())) + { + if (!__last_consumed_is_next_) + { + __c = __last_consumed_; + __last_consumed_is_next_ = !traits_type::eq_int_type(__last_consumed_, + traits_type::eof()); + } + return __c; + } + if (__last_consumed_is_next_) + { + char __extbuf[__limit]; + char* __enxt; + const char_type __ci = traits_type::to_char_type(__last_consumed_); + const char_type* __inxt; + switch (__cv_->out(*__st_, &__ci, &__ci + 1, __inxt, + __extbuf, __extbuf + sizeof(__extbuf), __enxt)) + { + case _CUDA_VSTD::codecvt_base::ok: + break; + case _CUDA_VSTD::codecvt_base::noconv: + __extbuf[0] = static_cast(__last_consumed_); + __enxt = __extbuf + 1; + break; + case codecvt_base::partial: + case codecvt_base::error: + return traits_type::eof(); + } + while (__enxt > __extbuf) + if (ungetc(*--__enxt, __file_) == EOF) + return traits_type::eof(); + } + __last_consumed_ = __c; + __last_consumed_is_next_ = true; + return __c; +} + +// __stdoutbuf + +template +class _LIBCUDACXX_HIDDEN __stdoutbuf + : public basic_streambuf<_CharT, char_traits<_CharT> > +{ +public: + typedef _CharT char_type; + typedef char_traits traits_type; + typedef typename traits_type::int_type int_type; + typedef typename traits_type::pos_type pos_type; + typedef typename traits_type::off_type off_type; + typedef typename traits_type::state_type state_type; + + __stdoutbuf(FILE* __fp, state_type* __st); + +protected: + virtual int_type overflow (int_type __c = traits_type::eof()); + virtual streamsize xsputn(const char_type* __s, streamsize __n); + virtual int sync(); + virtual void imbue(const locale& __loc); + +private: + FILE* __file_; + const codecvt* __cv_; + state_type* __st_; + bool __always_noconv_; + + __stdoutbuf(const __stdoutbuf&); + __stdoutbuf& operator=(const __stdoutbuf&); +}; + +template +__stdoutbuf<_CharT>::__stdoutbuf(FILE* __fp, state_type* __st) + : __file_(__fp), + __cv_(&use_facet >(this->getloc())), + __st_(__st), + __always_noconv_(__cv_->always_noconv()) +{ +} + +template +typename __stdoutbuf<_CharT>::int_type +__stdoutbuf<_CharT>::overflow(int_type __c) +{ + char __extbuf[__limit]; + char_type __1buf; + if (!traits_type::eq_int_type(__c, traits_type::eof())) + { + __1buf = traits_type::to_char_type(__c); + if (__always_noconv_) + { + if (fwrite(&__1buf, sizeof(char_type), 1, __file_) != 1) + return traits_type::eof(); + } + else + { + char* __extbe = __extbuf; + codecvt_base::result __r; + char_type* pbase = &__1buf; + char_type* pptr = pbase + 1; + do + { + const char_type* __e; + __r = __cv_->out(*__st_, pbase, pptr, __e, + __extbuf, + __extbuf + sizeof(__extbuf), + __extbe); + if (__e == pbase) + return traits_type::eof(); + if (__r == codecvt_base::noconv) + { + if (fwrite(pbase, 1, 1, __file_) != 1) + return traits_type::eof(); + } + else if (__r == codecvt_base::ok || __r == codecvt_base::partial) + { + size_t __nmemb = static_cast(__extbe - __extbuf); + if (fwrite(__extbuf, 1, __nmemb, __file_) != __nmemb) + return traits_type::eof(); + if (__r == codecvt_base::partial) + { + pbase = const_cast(__e); + } + } + else + return traits_type::eof(); + } while (__r == codecvt_base::partial); + } + } + return traits_type::not_eof(__c); +} + +template +streamsize +__stdoutbuf<_CharT>::xsputn(const char_type* __s, streamsize __n) +{ + if (__always_noconv_) + return fwrite(__s, sizeof(char_type), __n, __file_); + streamsize __i = 0; + for (; __i < __n; ++__i, ++__s) + if (overflow(traits_type::to_int_type(*__s)) == traits_type::eof()) + break; + return __i; +} + +template +int +__stdoutbuf<_CharT>::sync() +{ + char __extbuf[__limit]; + codecvt_base::result __r; + do + { + char* __extbe; + __r = __cv_->unshift(*__st_, __extbuf, + __extbuf + sizeof(__extbuf), + __extbe); + size_t __nmemb = static_cast(__extbe - __extbuf); + if (fwrite(__extbuf, 1, __nmemb, __file_) != __nmemb) + return -1; + } while (__r == codecvt_base::partial); + if (__r == codecvt_base::error) + return -1; + if (fflush(__file_)) + return -1; + return 0; +} + +template +void +__stdoutbuf<_CharT>::imbue(const locale& __loc) +{ + sync(); + __cv_ = &use_facet >(__loc); + __always_noconv_ = __cv_->always_noconv(); +} + +_LIBCUDACXX_END_NAMESPACE_STD + +_LIBCUDACXX_POP_MACROS + +#endif // _LIBCUDACXX___STD_STREAM diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__string b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__string new file mode 100644 index 000000000000..45dfbf6f7864 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__string @@ -0,0 +1,985 @@ +// -*- C++ -*- +//===-------------------------- __string ----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___STRING +#define _LIBCUDACXX___STRING + +/* + string synopsis + +namespace std +{ + +template +struct char_traits +{ + typedef charT char_type; + typedef ... int_type; + typedef streamoff off_type; + typedef streampos pos_type; + typedef mbstate_t state_type; + + static constexpr void assign(char_type& c1, const char_type& c2) noexcept; + static constexpr bool eq(char_type c1, char_type c2) noexcept; + static constexpr bool lt(char_type c1, char_type c2) noexcept; + + static constexpr int compare(const char_type* s1, const char_type* s2, size_t n); + static constexpr size_t length(const char_type* s); + static constexpr const char_type* + find(const char_type* s, size_t n, const char_type& a); + static char_type* move(char_type* s1, const char_type* s2, size_t n); + static char_type* copy(char_type* s1, const char_type* s2, size_t n); + static char_type* assign(char_type* s, size_t n, char_type a); + + static constexpr int_type not_eof(int_type c) noexcept; + static constexpr char_type to_char_type(int_type c) noexcept; + static constexpr int_type to_int_type(char_type c) noexcept; + static constexpr bool eq_int_type(int_type c1, int_type c2) noexcept; + static constexpr int_type eof() noexcept; +}; + +template <> struct char_traits; +template <> struct char_traits; +template <> struct char_traits; // c++20 + +} // std + +*/ + +#include <__config> +#include // for search and min +#include // For EOF. +#include // for __murmur2_or_cityhash + +#include <__debug> + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_PUSH_MACROS +#include <__undef_macros> + + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +// char_traits + +template +struct _LIBCUDACXX_TEMPLATE_VIS char_traits +{ + typedef _CharT char_type; + typedef int int_type; + typedef streamoff off_type; + typedef streampos pos_type; + typedef mbstate_t state_type; + + static inline void _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + assign(char_type& __c1, const char_type& __c2) _NOEXCEPT {__c1 = __c2;} + static inline _LIBCUDACXX_CONSTEXPR bool eq(char_type __c1, char_type __c2) _NOEXCEPT + {return __c1 == __c2;} + static inline _LIBCUDACXX_CONSTEXPR bool lt(char_type __c1, char_type __c2) _NOEXCEPT + {return __c1 < __c2;} + + static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + int compare(const char_type* __s1, const char_type* __s2, size_t __n); + _LIBCUDACXX_INLINE_VISIBILITY static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + size_t length(const char_type* __s); + _LIBCUDACXX_INLINE_VISIBILITY static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + const char_type* find(const char_type* __s, size_t __n, const char_type& __a); + static char_type* move(char_type* __s1, const char_type* __s2, size_t __n); + _LIBCUDACXX_INLINE_VISIBILITY + static char_type* copy(char_type* __s1, const char_type* __s2, size_t __n); + _LIBCUDACXX_INLINE_VISIBILITY + static char_type* assign(char_type* __s, size_t __n, char_type __a); + + static inline _LIBCUDACXX_CONSTEXPR int_type not_eof(int_type __c) _NOEXCEPT + {return eq_int_type(__c, eof()) ? ~eof() : __c;} + static inline _LIBCUDACXX_CONSTEXPR char_type to_char_type(int_type __c) _NOEXCEPT + {return char_type(__c);} + static inline _LIBCUDACXX_CONSTEXPR int_type to_int_type(char_type __c) _NOEXCEPT + {return int_type(__c);} + static inline _LIBCUDACXX_CONSTEXPR bool eq_int_type(int_type __c1, int_type __c2) _NOEXCEPT + {return __c1 == __c2;} + static inline _LIBCUDACXX_CONSTEXPR int_type eof() _NOEXCEPT + {return int_type(EOF);} +}; + +template +_LIBCUDACXX_CONSTEXPR_AFTER_CXX14 int +char_traits<_CharT>::compare(const char_type* __s1, const char_type* __s2, size_t __n) +{ + for (; __n; --__n, ++__s1, ++__s2) + { + if (lt(*__s1, *__s2)) + return -1; + if (lt(*__s2, *__s1)) + return 1; + } + return 0; +} + +template +inline +_LIBCUDACXX_CONSTEXPR_AFTER_CXX14 size_t +char_traits<_CharT>::length(const char_type* __s) +{ + size_t __len = 0; + for (; !eq(*__s, char_type(0)); ++__s) + ++__len; + return __len; +} + +template +inline +_LIBCUDACXX_CONSTEXPR_AFTER_CXX14 const _CharT* +char_traits<_CharT>::find(const char_type* __s, size_t __n, const char_type& __a) +{ + for (; __n; --__n) + { + if (eq(*__s, __a)) + return __s; + ++__s; + } + return 0; +} + +template +_CharT* +char_traits<_CharT>::move(char_type* __s1, const char_type* __s2, size_t __n) +{ + char_type* __r = __s1; + if (__s1 < __s2) + { + for (; __n; --__n, ++__s1, ++__s2) + assign(*__s1, *__s2); + } + else if (__s2 < __s1) + { + __s1 += __n; + __s2 += __n; + for (; __n; --__n) + assign(*--__s1, *--__s2); + } + return __r; +} + +template +inline +_CharT* +char_traits<_CharT>::copy(char_type* __s1, const char_type* __s2, size_t __n) +{ + _LIBCUDACXX_ASSERT(__s2 < __s1 || __s2 >= __s1+__n, "char_traits::copy overlapped range"); + char_type* __r = __s1; + for (; __n; --__n, ++__s1, ++__s2) + assign(*__s1, *__s2); + return __r; +} + +template +inline +_CharT* +char_traits<_CharT>::assign(char_type* __s, size_t __n, char_type __a) +{ + char_type* __r = __s; + for (; __n; --__n, ++__s) + assign(*__s, __a); + return __r; +} + +// char_traits + +template <> +struct _LIBCUDACXX_TEMPLATE_VIS char_traits +{ + typedef char char_type; + typedef int int_type; + typedef streamoff off_type; + typedef streampos pos_type; + typedef mbstate_t state_type; + + static inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + void assign(char_type& __c1, const char_type& __c2) _NOEXCEPT {__c1 = __c2;} + static inline _LIBCUDACXX_CONSTEXPR bool eq(char_type __c1, char_type __c2) _NOEXCEPT + {return __c1 == __c2;} + static inline _LIBCUDACXX_CONSTEXPR bool lt(char_type __c1, char_type __c2) _NOEXCEPT + {return (unsigned char)__c1 < (unsigned char)__c2;} + + static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + int compare(const char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT; + static inline size_t _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + length(const char_type* __s) _NOEXCEPT {return __builtin_strlen(__s);} + static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + const char_type* find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT; + static inline char_type* move(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT + {return __n == 0 ? __s1 : (char_type*) memmove(__s1, __s2, __n);} + static inline char_type* copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT + { + _LIBCUDACXX_ASSERT(__s2 < __s1 || __s2 >= __s1+__n, "char_traits::copy overlapped range"); + return __n == 0 ? __s1 : (char_type*)memcpy(__s1, __s2, __n); + } + static inline char_type* assign(char_type* __s, size_t __n, char_type __a) _NOEXCEPT + {return __n == 0 ? __s : (char_type*)memset(__s, to_int_type(__a), __n);} + + static inline _LIBCUDACXX_CONSTEXPR int_type not_eof(int_type __c) _NOEXCEPT + {return eq_int_type(__c, eof()) ? ~eof() : __c;} + static inline _LIBCUDACXX_CONSTEXPR char_type to_char_type(int_type __c) _NOEXCEPT + {return char_type(__c);} + static inline _LIBCUDACXX_CONSTEXPR int_type to_int_type(char_type __c) _NOEXCEPT + {return int_type((unsigned char)__c);} + static inline _LIBCUDACXX_CONSTEXPR bool eq_int_type(int_type __c1, int_type __c2) _NOEXCEPT + {return __c1 == __c2;} + static inline _LIBCUDACXX_CONSTEXPR int_type eof() _NOEXCEPT + {return int_type(EOF);} +}; + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +int +char_traits::compare(const char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT +{ + if (__n == 0) + return 0; +#if __has_feature(cxx_constexpr_string_builtins) + return __builtin_memcmp(__s1, __s2, __n); +#elif _LIBCUDACXX_STD_VER <= 14 + return memcmp(__s1, __s2, __n); +#else + for (; __n; --__n, ++__s1, ++__s2) + { + if (lt(*__s1, *__s2)) + return -1; + if (lt(*__s2, *__s1)) + return 1; + } + return 0; +#endif +} + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +const char* +char_traits::find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT +{ + if (__n == 0) + return nullptr; +#if __has_feature(cxx_constexpr_string_builtins) + return __builtin_char_memchr(__s, to_int_type(__a), __n); +#elif _LIBCUDACXX_STD_VER <= 14 + return (const char_type*) memchr(__s, to_int_type(__a), __n); +#else + for (; __n; --__n) + { + if (eq(*__s, __a)) + return __s; + ++__s; + } + return nullptr; +#endif +} + + +// char_traits + +template <> +struct _LIBCUDACXX_TEMPLATE_VIS char_traits +{ + typedef wchar_t char_type; + typedef wint_t int_type; + typedef streamoff off_type; + typedef streampos pos_type; + typedef mbstate_t state_type; + + static inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + void assign(char_type& __c1, const char_type& __c2) _NOEXCEPT {__c1 = __c2;} + static inline _LIBCUDACXX_CONSTEXPR bool eq(char_type __c1, char_type __c2) _NOEXCEPT + {return __c1 == __c2;} + static inline _LIBCUDACXX_CONSTEXPR bool lt(char_type __c1, char_type __c2) _NOEXCEPT + {return __c1 < __c2;} + + static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + int compare(const char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT; + static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + size_t length(const char_type* __s) _NOEXCEPT; + static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + const char_type* find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT; + static inline char_type* move(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT + {return __n == 0 ? __s1 : (char_type*)wmemmove(__s1, __s2, __n);} + static inline char_type* copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT + { + _LIBCUDACXX_ASSERT(__s2 < __s1 || __s2 >= __s1+__n, "char_traits::copy overlapped range"); + return __n == 0 ? __s1 : (char_type*)wmemcpy(__s1, __s2, __n); + } + static inline char_type* assign(char_type* __s, size_t __n, char_type __a) _NOEXCEPT + {return __n == 0 ? __s : (char_type*)wmemset(__s, __a, __n);} + + static inline _LIBCUDACXX_CONSTEXPR int_type not_eof(int_type __c) _NOEXCEPT + {return eq_int_type(__c, eof()) ? ~eof() : __c;} + static inline _LIBCUDACXX_CONSTEXPR char_type to_char_type(int_type __c) _NOEXCEPT + {return char_type(__c);} + static inline _LIBCUDACXX_CONSTEXPR int_type to_int_type(char_type __c) _NOEXCEPT + {return int_type(__c);} + static inline _LIBCUDACXX_CONSTEXPR bool eq_int_type(int_type __c1, int_type __c2) _NOEXCEPT + {return __c1 == __c2;} + static inline _LIBCUDACXX_CONSTEXPR int_type eof() _NOEXCEPT + {return int_type(WEOF);} +}; + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +int +char_traits::compare(const char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT +{ + if (__n == 0) + return 0; +#if __has_feature(cxx_constexpr_string_builtins) + return __builtin_wmemcmp(__s1, __s2, __n); +#elif _LIBCUDACXX_STD_VER <= 14 + return wmemcmp(__s1, __s2, __n); +#else + for (; __n; --__n, ++__s1, ++__s2) + { + if (lt(*__s1, *__s2)) + return -1; + if (lt(*__s2, *__s1)) + return 1; + } + return 0; +#endif +} + + +template +_LIBCUDACXX_INLINE_VISIBILITY +_LIBCUDACXX_CONSTEXPR +inline size_t __char_traits_length_checked(const typename _Traits::char_type* __s) _NOEXCEPT { +#if _LIBCUDACXX_DEBUG_LEVEL >= 1 + return __s ? _Traits::length(__s) : (_CUDA_VSTD::__libcpp_debug_function(_CUDA_VSTD::__libcpp_debug_info(__FILE__, __LINE__, "p == nullptr", "null pointer pass to non-null argument of char_traits<...>::length")), 0); +#else + return _Traits::length(__s); +#endif +} + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +size_t +char_traits::length(const char_type* __s) _NOEXCEPT +{ +#if __has_feature(cxx_constexpr_string_builtins) + return __builtin_wcslen(__s); +#elif _LIBCUDACXX_STD_VER <= 14 + return wcslen(__s); +#else + size_t __len = 0; + for (; !eq(*__s, char_type(0)); ++__s) + ++__len; + return __len; +#endif +} + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +const wchar_t* +char_traits::find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT +{ + if (__n == 0) + return nullptr; +#if __has_feature(cxx_constexpr_string_builtins) + return __builtin_wmemchr(__s, __a, __n); +#elif _LIBCUDACXX_STD_VER <= 14 + return wmemchr(__s, __a, __n); +#else + for (; __n; --__n) + { + if (eq(*__s, __a)) + return __s; + ++__s; + } + return nullptr; +#endif +} + + +#ifndef _LIBCUDACXX_NO_HAS_CHAR8_T + +template <> +struct _LIBCUDACXX_TEMPLATE_VIS char_traits +{ + typedef char8_t char_type; + typedef unsigned int int_type; + typedef streamoff off_type; + typedef u8streampos pos_type; + typedef mbstate_t state_type; + + static inline constexpr void assign(char_type& __c1, const char_type& __c2) noexcept + {__c1 = __c2;} + static inline constexpr bool eq(char_type __c1, char_type __c2) noexcept + {return __c1 == __c2;} + static inline constexpr bool lt(char_type __c1, char_type __c2) noexcept + {return __c1 < __c2;} + + static constexpr + int compare(const char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT; + + static constexpr + size_t length(const char_type* __s) _NOEXCEPT; + + _LIBCUDACXX_INLINE_VISIBILITY static constexpr + const char_type* find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT; + + static char_type* move(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT + {return __n == 0 ? __s1 : (char_type*) memmove(__s1, __s2, __n);} + + static char_type* copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT + { + _LIBCUDACXX_ASSERT(__s2 < __s1 || __s2 >= __s1+__n, "char_traits::copy overlapped range"); + return __n == 0 ? __s1 : (char_type*)memcpy(__s1, __s2, __n); + } + + static char_type* assign(char_type* __s, size_t __n, char_type __a) _NOEXCEPT + {return __n == 0 ? __s : (char_type*)memset(__s, to_int_type(__a), __n);} + + static inline constexpr int_type not_eof(int_type __c) noexcept + {return eq_int_type(__c, eof()) ? ~eof() : __c;} + static inline constexpr char_type to_char_type(int_type __c) noexcept + {return char_type(__c);} + static inline constexpr int_type to_int_type(char_type __c) noexcept + {return int_type(__c);} + static inline constexpr bool eq_int_type(int_type __c1, int_type __c2) noexcept + {return __c1 == __c2;} + static inline constexpr int_type eof() noexcept + {return int_type(EOF);} +}; + +// TODO use '__builtin_strlen' if it ever supports char8_t ?? +inline constexpr +size_t +char_traits::length(const char_type* __s) _NOEXCEPT +{ + size_t __len = 0; + for (; !eq(*__s, char_type(0)); ++__s) + ++__len; + return __len; +} + +inline constexpr +int +char_traits::compare(const char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT +{ +#if __has_feature(cxx_constexpr_string_builtins) + return __builtin_memcmp(__s1, __s2, __n); +#else + for (; __n; --__n, ++__s1, ++__s2) + { + if (lt(*__s1, *__s2)) + return -1; + if (lt(*__s2, *__s1)) + return 1; + } + return 0; +#endif +} + +// TODO use '__builtin_char_memchr' if it ever supports char8_t ?? +inline constexpr +const char8_t* +char_traits::find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT +{ + for (; __n; --__n) + { + if (eq(*__s, __a)) + return __s; + ++__s; + } + return 0; +} + +#endif // #_LIBCUDACXX_NO_HAS_CHAR8_T + +#ifndef _LIBCUDACXX_HAS_NO_UNICODE_CHARS + +template <> +struct _LIBCUDACXX_TEMPLATE_VIS char_traits +{ + typedef char16_t char_type; + typedef uint_least16_t int_type; + typedef streamoff off_type; + typedef u16streampos pos_type; + typedef mbstate_t state_type; + + static inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + void assign(char_type& __c1, const char_type& __c2) _NOEXCEPT {__c1 = __c2;} + static inline _LIBCUDACXX_CONSTEXPR bool eq(char_type __c1, char_type __c2) _NOEXCEPT + {return __c1 == __c2;} + static inline _LIBCUDACXX_CONSTEXPR bool lt(char_type __c1, char_type __c2) _NOEXCEPT + {return __c1 < __c2;} + + _LIBCUDACXX_INLINE_VISIBILITY static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + int compare(const char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + size_t length(const char_type* __s) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + const char_type* find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + static char_type* move(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + static char_type* copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + static char_type* assign(char_type* __s, size_t __n, char_type __a) _NOEXCEPT; + + static inline _LIBCUDACXX_CONSTEXPR int_type not_eof(int_type __c) _NOEXCEPT + {return eq_int_type(__c, eof()) ? ~eof() : __c;} + static inline _LIBCUDACXX_CONSTEXPR char_type to_char_type(int_type __c) _NOEXCEPT + {return char_type(__c);} + static inline _LIBCUDACXX_CONSTEXPR int_type to_int_type(char_type __c) _NOEXCEPT + {return int_type(__c);} + static inline _LIBCUDACXX_CONSTEXPR bool eq_int_type(int_type __c1, int_type __c2) _NOEXCEPT + {return __c1 == __c2;} + static inline _LIBCUDACXX_CONSTEXPR int_type eof() _NOEXCEPT + {return int_type(0xFFFF);} +}; + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +int +char_traits::compare(const char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT +{ + for (; __n; --__n, ++__s1, ++__s2) + { + if (lt(*__s1, *__s2)) + return -1; + if (lt(*__s2, *__s1)) + return 1; + } + return 0; +} + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +size_t +char_traits::length(const char_type* __s) _NOEXCEPT +{ + size_t __len = 0; + for (; !eq(*__s, char_type(0)); ++__s) + ++__len; + return __len; +} + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +const char16_t* +char_traits::find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT +{ + for (; __n; --__n) + { + if (eq(*__s, __a)) + return __s; + ++__s; + } + return 0; +} + +inline +char16_t* +char_traits::move(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT +{ + char_type* __r = __s1; + if (__s1 < __s2) + { + for (; __n; --__n, ++__s1, ++__s2) + assign(*__s1, *__s2); + } + else if (__s2 < __s1) + { + __s1 += __n; + __s2 += __n; + for (; __n; --__n) + assign(*--__s1, *--__s2); + } + return __r; +} + +inline +char16_t* +char_traits::copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT +{ + _LIBCUDACXX_ASSERT(__s2 < __s1 || __s2 >= __s1+__n, "char_traits::copy overlapped range"); + char_type* __r = __s1; + for (; __n; --__n, ++__s1, ++__s2) + assign(*__s1, *__s2); + return __r; +} + +inline +char16_t* +char_traits::assign(char_type* __s, size_t __n, char_type __a) _NOEXCEPT +{ + char_type* __r = __s; + for (; __n; --__n, ++__s) + assign(*__s, __a); + return __r; +} + +template <> +struct _LIBCUDACXX_TEMPLATE_VIS char_traits +{ + typedef char32_t char_type; + typedef uint_least32_t int_type; + typedef streamoff off_type; + typedef u32streampos pos_type; + typedef mbstate_t state_type; + + static inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + void assign(char_type& __c1, const char_type& __c2) _NOEXCEPT {__c1 = __c2;} + static inline _LIBCUDACXX_CONSTEXPR bool eq(char_type __c1, char_type __c2) _NOEXCEPT + {return __c1 == __c2;} + static inline _LIBCUDACXX_CONSTEXPR bool lt(char_type __c1, char_type __c2) _NOEXCEPT + {return __c1 < __c2;} + + _LIBCUDACXX_INLINE_VISIBILITY static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + int compare(const char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + size_t length(const char_type* __s) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY static _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 + const char_type* find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + static char_type* move(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + static char_type* copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + static char_type* assign(char_type* __s, size_t __n, char_type __a) _NOEXCEPT; + + static inline _LIBCUDACXX_CONSTEXPR int_type not_eof(int_type __c) _NOEXCEPT + {return eq_int_type(__c, eof()) ? ~eof() : __c;} + static inline _LIBCUDACXX_CONSTEXPR char_type to_char_type(int_type __c) _NOEXCEPT + {return char_type(__c);} + static inline _LIBCUDACXX_CONSTEXPR int_type to_int_type(char_type __c) _NOEXCEPT + {return int_type(__c);} + static inline _LIBCUDACXX_CONSTEXPR bool eq_int_type(int_type __c1, int_type __c2) _NOEXCEPT + {return __c1 == __c2;} + static inline _LIBCUDACXX_CONSTEXPR int_type eof() _NOEXCEPT + {return int_type(0xFFFFFFFF);} +}; + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +int +char_traits::compare(const char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT +{ + for (; __n; --__n, ++__s1, ++__s2) + { + if (lt(*__s1, *__s2)) + return -1; + if (lt(*__s2, *__s1)) + return 1; + } + return 0; +} + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +size_t +char_traits::length(const char_type* __s) _NOEXCEPT +{ + size_t __len = 0; + for (; !eq(*__s, char_type(0)); ++__s) + ++__len; + return __len; +} + +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX14 +const char32_t* +char_traits::find(const char_type* __s, size_t __n, const char_type& __a) _NOEXCEPT +{ + for (; __n; --__n) + { + if (eq(*__s, __a)) + return __s; + ++__s; + } + return 0; +} + +inline +char32_t* +char_traits::move(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT +{ + char_type* __r = __s1; + if (__s1 < __s2) + { + for (; __n; --__n, ++__s1, ++__s2) + assign(*__s1, *__s2); + } + else if (__s2 < __s1) + { + __s1 += __n; + __s2 += __n; + for (; __n; --__n) + assign(*--__s1, *--__s2); + } + return __r; +} + +inline +char32_t* +char_traits::copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT +{ + _LIBCUDACXX_ASSERT(__s2 < __s1 || __s2 >= __s1+__n, "char_traits::copy overlapped range"); + char_type* __r = __s1; + for (; __n; --__n, ++__s1, ++__s2) + assign(*__s1, *__s2); + return __r; +} + +inline +char32_t* +char_traits::assign(char_type* __s, size_t __n, char_type __a) _NOEXCEPT +{ + char_type* __r = __s; + for (; __n; --__n, ++__s) + assign(*__s, __a); + return __r; +} + +#endif // _LIBCUDACXX_HAS_NO_UNICODE_CHARS + +// helper fns for basic_string and string_view + +// __str_find +template +inline _SizeT _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY +__str_find(const _CharT *__p, _SizeT __sz, + _CharT __c, _SizeT __pos) _NOEXCEPT +{ + if (__pos >= __sz) + return __npos; + const _CharT* __r = _Traits::find(__p + __pos, __sz - __pos, __c); + if (__r == 0) + return __npos; + return static_cast<_SizeT>(__r - __p); +} + +template +inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 const _CharT * +__search_substring(const _CharT *__first1, const _CharT *__last1, + const _CharT *__first2, const _CharT *__last2) { + // Take advantage of knowing source and pattern lengths. + // Stop short when source is smaller than pattern. + const ptrdiff_t __len2 = __last2 - __first2; + if (__len2 == 0) + return __first1; + + ptrdiff_t __len1 = __last1 - __first1; + if (__len1 < __len2) + return __last1; + + // First element of __first2 is loop invariant. + _CharT __f2 = *__first2; + while (true) { + __len1 = __last1 - __first1; + // Check whether __first1 still has at least __len2 bytes. + if (__len1 < __len2) + return __last1; + + // Find __f2 the first byte matching in __first1. + __first1 = _Traits::find(__first1, __len1 - __len2 + 1, __f2); + if (__first1 == 0) + return __last1; + + // It is faster to compare from the first byte of __first1 even if we + // already know that it matches the first byte of __first2: this is because + // __first2 is most likely aligned, as it is user's "pattern" string, and + // __first1 + 1 is most likely not aligned, as the match is in the middle of + // the string. + if (_Traits::compare(__first1, __first2, __len2) == 0) + return __first1; + + ++__first1; + } +} + +template +inline _SizeT _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY +__str_find(const _CharT *__p, _SizeT __sz, + const _CharT* __s, _SizeT __pos, _SizeT __n) _NOEXCEPT +{ + if (__pos > __sz) + return __npos; + + if (__n == 0) // There is nothing to search, just return __pos. + return __pos; + + const _CharT *__r = __search_substring<_CharT, _Traits>( + __p + __pos, __p + __sz, __s, __s + __n); + + if (__r == __p + __sz) + return __npos; + return static_cast<_SizeT>(__r - __p); +} + + +// __str_rfind + +template +inline _SizeT _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY +__str_rfind(const _CharT *__p, _SizeT __sz, + _CharT __c, _SizeT __pos) _NOEXCEPT +{ + if (__sz < 1) + return __npos; + if (__pos < __sz) + ++__pos; + else + __pos = __sz; + for (const _CharT* __ps = __p + __pos; __ps != __p;) + { + if (_Traits::eq(*--__ps, __c)) + return static_cast<_SizeT>(__ps - __p); + } + return __npos; +} + +template +inline _SizeT _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY +__str_rfind(const _CharT *__p, _SizeT __sz, + const _CharT* __s, _SizeT __pos, _SizeT __n) _NOEXCEPT +{ + __pos = _CUDA_VSTD::min(__pos, __sz); + if (__n < __sz - __pos) + __pos += __n; + else + __pos = __sz; + const _CharT* __r = _CUDA_VSTD::__find_end( + __p, __p + __pos, __s, __s + __n, _Traits::eq, + random_access_iterator_tag(), random_access_iterator_tag()); + if (__n > 0 && __r == __p + __pos) + return __npos; + return static_cast<_SizeT>(__r - __p); +} + +// __str_find_first_of +template +inline _SizeT _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY +__str_find_first_of(const _CharT *__p, _SizeT __sz, + const _CharT* __s, _SizeT __pos, _SizeT __n) _NOEXCEPT +{ + if (__pos >= __sz || __n == 0) + return __npos; + const _CharT* __r = _CUDA_VSTD::__find_first_of_ce + (__p + __pos, __p + __sz, __s, __s + __n, _Traits::eq ); + if (__r == __p + __sz) + return __npos; + return static_cast<_SizeT>(__r - __p); +} + + +// __str_find_last_of +template +inline _SizeT _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY +__str_find_last_of(const _CharT *__p, _SizeT __sz, + const _CharT* __s, _SizeT __pos, _SizeT __n) _NOEXCEPT + { + if (__n != 0) + { + if (__pos < __sz) + ++__pos; + else + __pos = __sz; + for (const _CharT* __ps = __p + __pos; __ps != __p;) + { + const _CharT* __r = _Traits::find(__s, __n, *--__ps); + if (__r) + return static_cast<_SizeT>(__ps - __p); + } + } + return __npos; +} + + +// __str_find_first_not_of +template +inline _SizeT _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY +__str_find_first_not_of(const _CharT *__p, _SizeT __sz, + const _CharT* __s, _SizeT __pos, _SizeT __n) _NOEXCEPT +{ + if (__pos < __sz) + { + const _CharT* __pe = __p + __sz; + for (const _CharT* __ps = __p + __pos; __ps != __pe; ++__ps) + if (_Traits::find(__s, __n, *__ps) == 0) + return static_cast<_SizeT>(__ps - __p); + } + return __npos; +} + + +template +inline _SizeT _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY +__str_find_first_not_of(const _CharT *__p, _SizeT __sz, + _CharT __c, _SizeT __pos) _NOEXCEPT +{ + if (__pos < __sz) + { + const _CharT* __pe = __p + __sz; + for (const _CharT* __ps = __p + __pos; __ps != __pe; ++__ps) + if (!_Traits::eq(*__ps, __c)) + return static_cast<_SizeT>(__ps - __p); + } + return __npos; +} + + +// __str_find_last_not_of +template +inline _SizeT _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY +__str_find_last_not_of(const _CharT *__p, _SizeT __sz, + const _CharT* __s, _SizeT __pos, _SizeT __n) _NOEXCEPT +{ + if (__pos < __sz) + ++__pos; + else + __pos = __sz; + for (const _CharT* __ps = __p + __pos; __ps != __p;) + if (_Traits::find(__s, __n, *--__ps) == 0) + return static_cast<_SizeT>(__ps - __p); + return __npos; +} + + +template +inline _SizeT _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY +__str_find_last_not_of(const _CharT *__p, _SizeT __sz, + _CharT __c, _SizeT __pos) _NOEXCEPT +{ + if (__pos < __sz) + ++__pos; + else + __pos = __sz; + for (const _CharT* __ps = __p + __pos; __ps != __p;) + if (!_Traits::eq(*--__ps, __c)) + return static_cast<_SizeT>(__ps - __p); + return __npos; +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +size_t __do_string_hash(_Ptr __p, _Ptr __e) +{ + typedef typename iterator_traits<_Ptr>::value_type value_type; + return __murmur2_or_cityhash()(__p, (__e-__p)*sizeof(value_type)); +} + +template > +struct __quoted_output_proxy +{ + _Iter __first; + _Iter __last; + _CharT __delim; + _CharT __escape; + + __quoted_output_proxy(_Iter __f, _Iter __l, _CharT __d, _CharT __e) + : __first(__f), __last(__l), __delim(__d), __escape(__e) {} + // This would be a nice place for a string_ref +}; + +_LIBCUDACXX_END_NAMESPACE_STD + +_LIBCUDACXX_POP_MACROS + +#endif // _LIBCUDACXX___STRING diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__threading_support b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__threading_support new file mode 100644 index 000000000000..9489f6a6bde0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__threading_support @@ -0,0 +1,787 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX_THREADING_SUPPORT +#define _LIBCUDACXX_THREADING_SUPPORT + +#ifndef __cuda_std__ +#include <__config> +#include +#include +#include +#include +#include <__pragma_push> +#endif + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +#if defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) +# ifndef __cuda_std__ +# include <__external_threading> +# else +# define _LIBCUDACXX_THREAD_ABI_VISIBILITY inline _LIBCUDACXX_INLINE_VISIBILITY +# endif +#elif !defined(_LIBCUDACXX_HAS_NO_THREADS) + +#if defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) +# include +# include +# include +# if defined(__APPLE__) +# include +# endif +# if defined(__linux__) +# include +# include +# include +# endif +#endif + +#if defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) +# include +# include +#endif + +#if defined(_LIBCUDACXX_HAS_THREAD_LIBRARY_EXTERNAL) || \ + defined(_LIBCUDACXX_BUILDING_THREAD_LIBRARY_EXTERNAL) +#define _LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_FUNC_VIS +#else +#define _LIBCUDACXX_THREAD_ABI_VISIBILITY inline _LIBCUDACXX_INLINE_VISIBILITY +#endif + +#if defined(__FreeBSD__) && defined(__clang__) && __has_attribute(no_thread_safety_analysis) +#define _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis)) +#else +#define _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +#endif + +typedef ::timespec __libcpp_timespec_t; +#endif // !defined(_LIBCUDACXX_HAS_NO_THREADS) + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +#if !defined(_LIBCUDACXX_HAS_NO_THREADS) + +#define _LIBCUDACXX_POLLING_COUNT 16 + +_LIBCUDACXX_INLINE_VISIBILITY +inline void __libcpp_thread_yield_processor() +{ +#if defined(__aarch64__) +# define __LIBCUDACXX_ASM_THREAD_YIELD (asm volatile ("yield" :::);) +#elif defined(__x86_64__) +# define __LIBCUDACXX_ASM_THREAD_YIELD (asm volatile ("pause" :::);) +#elif defined (__powerpc__) +# define __LIBCUDACXX_ASM_THREAD_YIELD (asm volatile ("or 27,27,27":::);) +#else +# define __LIBCUDACXX_ASM_THREAD_YIELD (;) +#endif + NV_IF_TARGET( + NV_IS_HOST, + __LIBCUDACXX_ASM_THREAD_YIELD + ) +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +void __libcpp_thread_yield(); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +void __libcpp_thread_sleep_for(chrono::nanoseconds __ns); + +template +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_thread_poll_with_backoff(_Fn && __f, chrono::nanoseconds __max = chrono::nanoseconds::zero()); + +#if defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) +// Mutex +typedef pthread_mutex_t __libcpp_mutex_t; +#define _LIBCUDACXX_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER + +typedef pthread_mutex_t __libcpp_recursive_mutex_t; + +// Condition Variable +typedef pthread_cond_t __libcpp_condvar_t; +#define _LIBCUDACXX_CONDVAR_INITIALIZER PTHREAD_COND_INITIALIZER + +// Semaphore +#if defined(__APPLE__) +typedef dispatch_semaphore_t __libcpp_semaphore_t; +# define _LIBCUDACXX_SEMAPHORE_MAX numeric_limits::max() +#else +typedef sem_t __libcpp_semaphore_t; +# define _LIBCUDACXX_SEMAPHORE_MAX SEM_VALUE_MAX +#endif + +// Execute once +typedef pthread_once_t __libcpp_exec_once_flag; +#define _LIBCUDACXX_EXEC_ONCE_INITIALIZER PTHREAD_ONCE_INIT + +// Thread id +typedef pthread_t __libcpp_thread_id; + +// Thread +#define _LIBCUDACXX_NULL_THREAD 0U + +typedef pthread_t __libcpp_thread_t; + +// Thread Local Storage +typedef pthread_key_t __libcpp_tls_key; + +#define _LIBCUDACXX_TLS_DESTRUCTOR_CC +#elif !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) +// Mutex +typedef void* __libcpp_mutex_t; +#define _LIBCUDACXX_MUTEX_INITIALIZER 0 + +#if defined(_M_IX86) || defined(__i386__) || defined(_M_ARM) || defined(__arm__) +typedef void* __libcpp_recursive_mutex_t[6]; +#elif defined(_M_AMD64) || defined(__x86_64__) || defined(_M_ARM64) || defined(__aarch64__) +typedef void* __libcpp_recursive_mutex_t[5]; +#else +# error Unsupported architecture +#endif + +// Condition Variable +typedef void* __libcpp_condvar_t; +#define _LIBCUDACXX_CONDVAR_INITIALIZER 0 + +// Semaphore +typedef void* __libcpp_semaphore_t; + +// Execute Once +typedef void* __libcpp_exec_once_flag; +#define _LIBCUDACXX_EXEC_ONCE_INITIALIZER 0 + +// Thread ID +typedef long __libcpp_thread_id; + +// Thread +#define _LIBCUDACXX_NULL_THREAD 0U + +typedef void* __libcpp_thread_t; + +// Thread Local Storage +typedef long __libcpp_tls_key; + +#define _LIBCUDACXX_TLS_DESTRUCTOR_CC __stdcall +#endif // !defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) && !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) + +#if !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +__libcpp_timespec_t __libcpp_to_timespec(const chrono::nanoseconds& __ns); + +// Mutex +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_recursive_mutex_init(__libcpp_recursive_mutex_t *__m); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_recursive_mutex_lock(__libcpp_recursive_mutex_t *__m); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +bool __libcpp_recursive_mutex_trylock(__libcpp_recursive_mutex_t *__m); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_recursive_mutex_unlock(__libcpp_recursive_mutex_t *__m); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_recursive_mutex_destroy(__libcpp_recursive_mutex_t *__m); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_mutex_lock(__libcpp_mutex_t *__m); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +bool __libcpp_mutex_trylock(__libcpp_mutex_t *__m); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_mutex_unlock(__libcpp_mutex_t *__m); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_mutex_destroy(__libcpp_mutex_t *__m); + +// Condition variable +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_condvar_signal(__libcpp_condvar_t* __cv); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_condvar_broadcast(__libcpp_condvar_t* __cv); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_condvar_wait(__libcpp_condvar_t* __cv, __libcpp_mutex_t* __m); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_condvar_timedwait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m, + __libcpp_timespec_t *__ts); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_condvar_destroy(__libcpp_condvar_t* __cv); + +// Semaphore +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_semaphore_init(__libcpp_semaphore_t* __sem, int __init); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_semaphore_destroy(__libcpp_semaphore_t* __sem); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_semaphore_post(__libcpp_semaphore_t* __sem); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_semaphore_wait(__libcpp_semaphore_t* __sem); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_semaphore_wait_timed(__libcpp_semaphore_t* __sem, chrono::nanoseconds const& __ns); + +// Execute once +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_execute_once(__libcpp_exec_once_flag *flag, + void (*init_routine)()); + +// Thread id +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_thread_id_equal(__libcpp_thread_id t1, __libcpp_thread_id t2); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_thread_id_less(__libcpp_thread_id t1, __libcpp_thread_id t2); + +// Thread +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_thread_isnull(const __libcpp_thread_t *__t); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_thread_create(__libcpp_thread_t *__t, void *(*__func)(void *), + void *__arg); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +__libcpp_thread_id __libcpp_thread_get_current_id(); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +__libcpp_thread_id __libcpp_thread_get_id(const __libcpp_thread_t *__t); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_thread_join(__libcpp_thread_t *__t); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_thread_detach(__libcpp_thread_t *__t); + +// Thread local storage +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_tls_create(__libcpp_tls_key* __key, + void(_LIBCUDACXX_TLS_DESTRUCTOR_CC* __at_exit)(void*)); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +void *__libcpp_tls_get(__libcpp_tls_key __key); + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_tls_set(__libcpp_tls_key __key, void *__p); + +#endif // !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) + +#if !defined(_LIBCUDACXX_HAS_THREAD_LIBRARY_EXTERNAL) || defined(_LIBCUDACXX_BUILDING_THREAD_LIBRARY_EXTERNAL) + +#if defined(_LIBCUDACXX_HAS_THREAD_API_CUDA) + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +void __libcpp_thread_yield() {} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +void __libcpp_thread_sleep_for(chrono::nanoseconds __ns) +{ + NV_IF_TARGET( + NV_IS_DEVICE, ( + auto const __step = __ns.count(); + assert(__step < numeric_limits::max()); + asm volatile("nanosleep.u32 %0;"::"r"((unsigned)__step):); + ) + ) +} + +#elif defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +__libcpp_timespec_t __libcpp_to_timespec(const chrono::nanoseconds& __ns) +{ + using namespace chrono; + seconds __s = duration_cast(__ns); + __libcpp_timespec_t __ts; + typedef decltype(__ts.tv_sec) ts_sec; + _LIBCUDACXX_CONSTEXPR ts_sec __ts_sec_max = numeric_limits::max(); + + if (__s.count() < __ts_sec_max) + { + __ts.tv_sec = static_cast(__s.count()); + __ts.tv_nsec = static_cast((__ns - __s).count()); + } + else + { + __ts.tv_sec = __ts_sec_max; + __ts.tv_nsec = 999999999; // (10^9 - 1) + } + return __ts; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_recursive_mutex_init(__libcpp_recursive_mutex_t *__m) +{ + pthread_mutexattr_t attr; + int __ec = pthread_mutexattr_init(&attr); + if (__ec) + return __ec; + __ec = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + if (__ec) { + pthread_mutexattr_destroy(&attr); + return __ec; + } + __ec = pthread_mutex_init(__m, &attr); + if (__ec) { + pthread_mutexattr_destroy(&attr); + return __ec; + } + __ec = pthread_mutexattr_destroy(&attr); + if (__ec) { + pthread_mutex_destroy(__m); + return __ec; + } + return 0; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_recursive_mutex_lock(__libcpp_recursive_mutex_t *__m) +{ + return pthread_mutex_lock(__m); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_recursive_mutex_trylock(__libcpp_recursive_mutex_t *__m) +{ + return pthread_mutex_trylock(__m) == 0; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_recursive_mutex_unlock(__libcpp_mutex_t *__m) +{ + return pthread_mutex_unlock(__m); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_recursive_mutex_destroy(__libcpp_recursive_mutex_t *__m) +{ + return pthread_mutex_destroy(__m); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_mutex_lock(__libcpp_mutex_t *__m) +{ + return pthread_mutex_lock(__m); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_mutex_trylock(__libcpp_mutex_t *__m) +{ + return pthread_mutex_trylock(__m) == 0; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_mutex_unlock(__libcpp_mutex_t *__m) +{ + return pthread_mutex_unlock(__m); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_mutex_destroy(__libcpp_mutex_t *__m) +{ + return pthread_mutex_destroy(__m); +} + +// Condition Variable +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_condvar_signal(__libcpp_condvar_t *__cv) +{ + return pthread_cond_signal(__cv); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_condvar_broadcast(__libcpp_condvar_t *__cv) +{ + return pthread_cond_broadcast(__cv); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_condvar_wait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m) +{ + return pthread_cond_wait(__cv, __m); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_condvar_timedwait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m, + __libcpp_timespec_t *__ts) +{ + return pthread_cond_timedwait(__cv, __m, __ts); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_condvar_destroy(__libcpp_condvar_t *__cv) +{ + return pthread_cond_destroy(__cv); +} + +// Semaphore +#if defined(__APPLE__) + +bool __libcpp_semaphore_init(__libcpp_semaphore_t* __sem, int __init) +{ + return (*__sem = dispatch_semaphore_create(__init)) != NULL; +} + +bool __libcpp_semaphore_destroy(__libcpp_semaphore_t* __sem) +{ + dispatch_release(*__sem); + return true; +} + +bool __libcpp_semaphore_post(__libcpp_semaphore_t* __sem) +{ + dispatch_semaphore_signal(*__sem); + return true; +} + +bool __libcpp_semaphore_wait(__libcpp_semaphore_t* __sem) +{ + return dispatch_semaphore_wait(*__sem, DISPATCH_TIME_FOREVER) == 0; +} + +bool __libcpp_semaphore_wait_timed(__libcpp_semaphore_t* __sem, chrono::nanoseconds const& __ns) +{ + return dispatch_semaphore_wait(*__sem, dispatch_time(DISPATCH_TIME_NOW, __ns.count())) == 0; +} + +#else + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_semaphore_init(__libcpp_semaphore_t* __sem, int __init) +{ + return sem_init(__sem, 0, __init) == 0; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_semaphore_destroy(__libcpp_semaphore_t* __sem) +{ + return sem_destroy(__sem) == 0; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_semaphore_post(__libcpp_semaphore_t* __sem) +{ + return sem_post(__sem) == 0; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_semaphore_wait(__libcpp_semaphore_t* __sem) +{ + return sem_wait(__sem) == 0; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_semaphore_wait_timed(__libcpp_semaphore_t* __sem, chrono::nanoseconds const& __ns) +{ + __libcpp_timespec_t __ts = __libcpp_to_timespec(__ns); + return sem_timedwait(__sem, &__ts) == 0; +} + +#endif //__APPLE__ + +// Execute once +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_execute_once(__libcpp_exec_once_flag *flag, void (*init_routine)()) +{ + return pthread_once(flag, init_routine); +} + +// Thread id +// Returns non-zero if the thread ids are equal, otherwise 0 +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_thread_id_equal(__libcpp_thread_id t1, __libcpp_thread_id t2) +{ + return pthread_equal(t1, t2) != 0; +} + +// Returns non-zero if t1 < t2, otherwise 0 +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_thread_id_less(__libcpp_thread_id t1, __libcpp_thread_id t2) +{ + return t1 < t2; +} + +// Thread +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_thread_isnull(const __libcpp_thread_t *__t) +{ + return *__t == 0; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_thread_create(__libcpp_thread_t *__t, void *(*__func)(void *), + void *__arg) +{ + return pthread_create(__t, 0, __func, __arg); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +__libcpp_thread_id __libcpp_thread_get_current_id() +{ + return pthread_self(); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +__libcpp_thread_id __libcpp_thread_get_id(const __libcpp_thread_t *__t) +{ + return *__t; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_thread_join(__libcpp_thread_t *__t) +{ + return pthread_join(*__t, 0); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_thread_detach(__libcpp_thread_t *__t) +{ + return pthread_detach(*__t); +} + +// Thread local storage +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_tls_create(__libcpp_tls_key *__key, void (*__at_exit)(void *)) +{ + return pthread_key_create(__key, __at_exit); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +void *__libcpp_tls_get(__libcpp_tls_key __key) +{ + return pthread_getspecific(__key); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_tls_set(__libcpp_tls_key __key, void *__p) +{ + return pthread_setspecific(__key, __p); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +void __libcpp_thread_yield() +{ + sched_yield(); +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +void __libcpp_thread_sleep_for(chrono::nanoseconds __ns) +{ + __libcpp_timespec_t __ts = __libcpp_to_timespec(__ns); + while (nanosleep(&__ts, &__ts) == -1 && errno == EINTR); +} + +#if defined(__linux__) && !defined(_LIBCUDACXX_HAS_NO_PLATFORM_WAIT) + +#define _LIBCUDACXX_HAS_PLATFORM_WAIT + +typedef int __libcpp_platform_wait_t; + +template +struct __libcpp_platform_wait_uses_type { + enum { __value = is_same::type, __libcpp_platform_wait_t>::value }; +}; + +template ::__value, int>::type = 1> +void __libcpp_platform_wait(_Tp const* ptr, _Tp val, void const* timeout) { + syscall(SYS_futex, ptr, FUTEX_WAIT_PRIVATE, val, timeout, 0, 0); +} + +template ::__value, int>::type = 1> +void __libcpp_platform_wake(_Tp const* ptr, bool all) { + syscall(SYS_futex, ptr, FUTEX_WAKE_PRIVATE, all ? INT_MAX : 1, 0, 0, 0); +} + +#endif // defined(__linux__) && !defined(_LIBCUDACXX_HAS_NO_PLATFORM_WAIT) + +#elif defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) + +void __libcpp_thread_yield() +{ + SwitchToThread(); +} + +void __libcpp_thread_sleep_for(chrono::nanoseconds __ns) +{ + using namespace chrono; + // round-up to the nearest milisecond + milliseconds __ms = + duration_cast(__ns + chrono::nanoseconds(999999)); + Sleep(static_cast(__ms.count())); +} + +#endif // defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) + +#endif // !defined(_LIBCUDACXX_HAS_THREAD_LIBRARY_EXTERNAL) || defined(_LIBCUDACXX_BUILDING_THREAD_LIBRARY_EXTERNAL) + +template +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_thread_poll_with_backoff(_Fn && __f, chrono::nanoseconds __max) +{ + chrono::high_resolution_clock::time_point const __start = chrono::high_resolution_clock::now(); + for(int __count = 0;;) { + if(__f()) + return true; + if(__count < _LIBCUDACXX_POLLING_COUNT) { + if(__count > (_LIBCUDACXX_POLLING_COUNT >> 1)) + __libcpp_thread_yield_processor(); + __count += 1; + continue; + } + chrono::high_resolution_clock::duration const __elapsed = chrono::high_resolution_clock::now() - __start; + if(__max != chrono::nanoseconds::zero() && + __max < __elapsed) + return false; + chrono::nanoseconds const __step = __elapsed / 4; + if(__step >= chrono::milliseconds(1)) + __libcpp_thread_sleep_for(chrono::milliseconds(1)); + else if(__step >= chrono::microseconds(10)) + __libcpp_thread_sleep_for(__step); + else + __libcpp_thread_yield(); + } +} + +#if _LIBCUDACXX_STD_VER < 11 +# define _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE +#endif + +#ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE + +struct alignas(64) __libcpp_contention_t { +#if defined(_LIBCUDACXX_HAS_PLATFORM_WAIT) + ptrdiff_t __waiters = 0; + __libcpp_platform_wait_t __version = 0; +#else + ptrdiff_t __credit = 0; + __libcpp_mutex_t __mutex = _LIBCUDACXX_MUTEX_INITIALIZER; + __libcpp_condvar_t __condvar = _LIBCUDACXX_CONDVAR_INITIALIZER; +#endif +}; + +_LIBCUDACXX_FUNC_VIS +__libcpp_contention_t * __libcpp_contention_state(void const volatile * p) _NOEXCEPT; + +#endif // _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE + +#if !defined(_LIBCUDACXX_HAS_NO_TREE_BARRIER) && !defined(_LIBCUDACXX_HAS_NO_THREAD_FAVORITE_BARRIER_INDEX) + +_LIBCUDACXX_EXPORTED_FROM_ABI +extern thread_local ptrdiff_t __libcpp_thread_favorite_barrier_index; + +#endif + +#ifndef __cuda_std__ + +class _LIBCUDACXX_TYPE_VIS thread; +class _LIBCUDACXX_TYPE_VIS __thread_id; + +namespace this_thread +{ + +_LIBCUDACXX_INLINE_VISIBILITY __thread_id get_id() _NOEXCEPT; + +} // this_thread + +template<> struct hash<__thread_id>; + +class _LIBCUDACXX_TEMPLATE_VIS __thread_id +{ + // FIXME: pthread_t is a pointer on Darwin but a long on Linux. + // NULL is the no-thread value on Darwin. Someone needs to check + // on other platforms. We assume 0 works everywhere for now. + __libcpp_thread_id __id_; + +public: + _LIBCUDACXX_INLINE_VISIBILITY + __thread_id() _NOEXCEPT : __id_(0) {} + + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator==(__thread_id __x, __thread_id __y) _NOEXCEPT + { // don't pass id==0 to underlying routines + if (__x.__id_ == 0) return __y.__id_ == 0; + if (__y.__id_ == 0) return false; + return __libcpp_thread_id_equal(__x.__id_, __y.__id_); + } + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator!=(__thread_id __x, __thread_id __y) _NOEXCEPT + {return !(__x == __y);} + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator< (__thread_id __x, __thread_id __y) _NOEXCEPT + { // id==0 is always less than any other thread_id + if (__x.__id_ == 0) return __y.__id_ != 0; + if (__y.__id_ == 0) return false; + return __libcpp_thread_id_less(__x.__id_, __y.__id_); + } + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator<=(__thread_id __x, __thread_id __y) _NOEXCEPT + {return !(__y < __x);} + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator> (__thread_id __x, __thread_id __y) _NOEXCEPT + {return __y < __x ;} + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator>=(__thread_id __x, __thread_id __y) _NOEXCEPT + {return !(__x < __y);} + + _LIBCUDACXX_INLINE_VISIBILITY + void __reset() { __id_ = 0; } + +#ifndef __cuda_std__ + template + friend + _LIBCUDACXX_INLINE_VISIBILITY + basic_ostream<_CharT, _Traits>& + operator<<(basic_ostream<_CharT, _Traits>& __os, __thread_id __id); +#endif + +private: + _LIBCUDACXX_INLINE_VISIBILITY + __thread_id(__libcpp_thread_id __id) : __id_(__id) {} + + friend __thread_id this_thread::get_id() _NOEXCEPT; + friend class _LIBCUDACXX_TYPE_VIS thread; + friend struct _LIBCUDACXX_TEMPLATE_VIS hash<__thread_id>; +}; + +namespace this_thread +{ + +inline _LIBCUDACXX_INLINE_VISIBILITY +__thread_id +get_id() _NOEXCEPT +{ + return __libcpp_thread_get_current_id(); +} + +} // this_thread + +#endif // __cuda_std__ + +#endif // !_LIBCUDACXX_HAS_NO_THREADS + +_LIBCUDACXX_END_NAMESPACE_STD + +#ifndef __cuda_std__ +#include <__pragma_pop> +#endif + +#endif // _LIBCUDACXX_THREADING_SUPPORT diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__tree b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__tree new file mode 100644 index 000000000000..14922a74cef0 --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__tree @@ -0,0 +1,2844 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___TREE +#define _LIBCUDACXX___TREE + +#include <__config> +#include +#include +#include +#include + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_PUSH_MACROS +#include <__undef_macros> + + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +#if defined(__GNUC__) && !defined(__clang__) // gcc.gnu.org/PR37804 +template class _LIBCUDACXX_TEMPLATE_VIS map; +template class _LIBCUDACXX_TEMPLATE_VIS multimap; +template class _LIBCUDACXX_TEMPLATE_VIS set; +template class _LIBCUDACXX_TEMPLATE_VIS multiset; +#endif + +template class __tree; +template + class _LIBCUDACXX_TEMPLATE_VIS __tree_iterator; +template + class _LIBCUDACXX_TEMPLATE_VIS __tree_const_iterator; + +template class __tree_end_node; +template class __tree_node_base; +template class __tree_node; + +template +struct __value_type; + +template class __map_node_destructor; +template class _LIBCUDACXX_TEMPLATE_VIS __map_iterator; +template class _LIBCUDACXX_TEMPLATE_VIS __map_const_iterator; + +/* + +_NodePtr algorithms + +The algorithms taking _NodePtr are red black tree algorithms. Those +algorithms taking a parameter named __root should assume that __root +points to a proper red black tree (unless otherwise specified). + +Each algorithm herein assumes that __root->__parent_ points to a non-null +structure which has a member __left_ which points back to __root. No other +member is read or written to at __root->__parent_. + +__root->__parent_ will be referred to below (in comments only) as end_node. +end_node->__left_ is an externably accessible lvalue for __root, and can be +changed by node insertion and removal (without explicit reference to end_node). + +All nodes (with the exception of end_node), even the node referred to as +__root, have a non-null __parent_ field. + +*/ + +// Returns: true if __x is a left child of its parent, else false +// Precondition: __x != nullptr. +template +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +__tree_is_left_child(_NodePtr __x) _NOEXCEPT +{ + return __x == __x->__parent_->__left_; +} + +// Determines if the subtree rooted at __x is a proper red black subtree. If +// __x is a proper subtree, returns the black height (null counts as 1). If +// __x is an improper subtree, returns 0. +template +unsigned +__tree_sub_invariant(_NodePtr __x) +{ + if (__x == nullptr) + return 1; + // parent consistency checked by caller + // check __x->__left_ consistency + if (__x->__left_ != nullptr && __x->__left_->__parent_ != __x) + return 0; + // check __x->__right_ consistency + if (__x->__right_ != nullptr && __x->__right_->__parent_ != __x) + return 0; + // check __x->__left_ != __x->__right_ unless both are nullptr + if (__x->__left_ == __x->__right_ && __x->__left_ != nullptr) + return 0; + // If this is red, neither child can be red + if (!__x->__is_black_) + { + if (__x->__left_ && !__x->__left_->__is_black_) + return 0; + if (__x->__right_ && !__x->__right_->__is_black_) + return 0; + } + unsigned __h = __tree_sub_invariant(__x->__left_); + if (__h == 0) + return 0; // invalid left subtree + if (__h != __tree_sub_invariant(__x->__right_)) + return 0; // invalid or different height right subtree + return __h + __x->__is_black_; // return black height of this node +} + +// Determines if the red black tree rooted at __root is a proper red black tree. +// __root == nullptr is a proper tree. Returns true is __root is a proper +// red black tree, else returns false. +template +bool +__tree_invariant(_NodePtr __root) +{ + if (__root == nullptr) + return true; + // check __x->__parent_ consistency + if (__root->__parent_ == nullptr) + return false; + if (!__tree_is_left_child(__root)) + return false; + // root must be black + if (!__root->__is_black_) + return false; + // do normal node checks + return __tree_sub_invariant(__root) != 0; +} + +// Returns: pointer to the left-most node under __x. +// Precondition: __x != nullptr. +template +inline _LIBCUDACXX_INLINE_VISIBILITY +_NodePtr +__tree_min(_NodePtr __x) _NOEXCEPT +{ + while (__x->__left_ != nullptr) + __x = __x->__left_; + return __x; +} + +// Returns: pointer to the right-most node under __x. +// Precondition: __x != nullptr. +template +inline _LIBCUDACXX_INLINE_VISIBILITY +_NodePtr +__tree_max(_NodePtr __x) _NOEXCEPT +{ + while (__x->__right_ != nullptr) + __x = __x->__right_; + return __x; +} + +// Returns: pointer to the next in-order node after __x. +// Precondition: __x != nullptr. +template +_NodePtr +__tree_next(_NodePtr __x) _NOEXCEPT +{ + if (__x->__right_ != nullptr) + return __tree_min(__x->__right_); + while (!__tree_is_left_child(__x)) + __x = __x->__parent_unsafe(); + return __x->__parent_unsafe(); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +_EndNodePtr +__tree_next_iter(_NodePtr __x) _NOEXCEPT +{ + if (__x->__right_ != nullptr) + return static_cast<_EndNodePtr>(__tree_min(__x->__right_)); + while (!__tree_is_left_child(__x)) + __x = __x->__parent_unsafe(); + return static_cast<_EndNodePtr>(__x->__parent_); +} + +// Returns: pointer to the previous in-order node before __x. +// Precondition: __x != nullptr. +// Note: __x may be the end node. +template +inline _LIBCUDACXX_INLINE_VISIBILITY +_NodePtr +__tree_prev_iter(_EndNodePtr __x) _NOEXCEPT +{ + if (__x->__left_ != nullptr) + return __tree_max(__x->__left_); + _NodePtr __xx = static_cast<_NodePtr>(__x); + while (__tree_is_left_child(__xx)) + __xx = __xx->__parent_unsafe(); + return __xx->__parent_unsafe(); +} + +// Returns: pointer to a node which has no children +// Precondition: __x != nullptr. +template +_NodePtr +__tree_leaf(_NodePtr __x) _NOEXCEPT +{ + while (true) + { + if (__x->__left_ != nullptr) + { + __x = __x->__left_; + continue; + } + if (__x->__right_ != nullptr) + { + __x = __x->__right_; + continue; + } + break; + } + return __x; +} + +// Effects: Makes __x->__right_ the subtree root with __x as its left child +// while preserving in-order order. +// Precondition: __x->__right_ != nullptr +template +void +__tree_left_rotate(_NodePtr __x) _NOEXCEPT +{ + _NodePtr __y = __x->__right_; + __x->__right_ = __y->__left_; + if (__x->__right_ != nullptr) + __x->__right_->__set_parent(__x); + __y->__parent_ = __x->__parent_; + if (__tree_is_left_child(__x)) + __x->__parent_->__left_ = __y; + else + __x->__parent_unsafe()->__right_ = __y; + __y->__left_ = __x; + __x->__set_parent(__y); +} + +// Effects: Makes __x->__left_ the subtree root with __x as its right child +// while preserving in-order order. +// Precondition: __x->__left_ != nullptr +template +void +__tree_right_rotate(_NodePtr __x) _NOEXCEPT +{ + _NodePtr __y = __x->__left_; + __x->__left_ = __y->__right_; + if (__x->__left_ != nullptr) + __x->__left_->__set_parent(__x); + __y->__parent_ = __x->__parent_; + if (__tree_is_left_child(__x)) + __x->__parent_->__left_ = __y; + else + __x->__parent_unsafe()->__right_ = __y; + __y->__right_ = __x; + __x->__set_parent(__y); +} + +// Effects: Rebalances __root after attaching __x to a leaf. +// Precondition: __root != nulptr && __x != nullptr. +// __x has no children. +// __x == __root or == a direct or indirect child of __root. +// If __x were to be unlinked from __root (setting __root to +// nullptr if __root == __x), __tree_invariant(__root) == true. +// Postcondition: __tree_invariant(end_node->__left_) == true. end_node->__left_ +// may be different than the value passed in as __root. +template +void +__tree_balance_after_insert(_NodePtr __root, _NodePtr __x) _NOEXCEPT +{ + __x->__is_black_ = __x == __root; + while (__x != __root && !__x->__parent_unsafe()->__is_black_) + { + // __x->__parent_ != __root because __x->__parent_->__is_black == false + if (__tree_is_left_child(__x->__parent_unsafe())) + { + _NodePtr __y = __x->__parent_unsafe()->__parent_unsafe()->__right_; + if (__y != nullptr && !__y->__is_black_) + { + __x = __x->__parent_unsafe(); + __x->__is_black_ = true; + __x = __x->__parent_unsafe(); + __x->__is_black_ = __x == __root; + __y->__is_black_ = true; + } + else + { + if (!__tree_is_left_child(__x)) + { + __x = __x->__parent_unsafe(); + __tree_left_rotate(__x); + } + __x = __x->__parent_unsafe(); + __x->__is_black_ = true; + __x = __x->__parent_unsafe(); + __x->__is_black_ = false; + __tree_right_rotate(__x); + break; + } + } + else + { + _NodePtr __y = __x->__parent_unsafe()->__parent_->__left_; + if (__y != nullptr && !__y->__is_black_) + { + __x = __x->__parent_unsafe(); + __x->__is_black_ = true; + __x = __x->__parent_unsafe(); + __x->__is_black_ = __x == __root; + __y->__is_black_ = true; + } + else + { + if (__tree_is_left_child(__x)) + { + __x = __x->__parent_unsafe(); + __tree_right_rotate(__x); + } + __x = __x->__parent_unsafe(); + __x->__is_black_ = true; + __x = __x->__parent_unsafe(); + __x->__is_black_ = false; + __tree_left_rotate(__x); + break; + } + } + } +} + +// Precondition: __root != nullptr && __z != nullptr. +// __tree_invariant(__root) == true. +// __z == __root or == a direct or indirect child of __root. +// Effects: unlinks __z from the tree rooted at __root, rebalancing as needed. +// Postcondition: __tree_invariant(end_node->__left_) == true && end_node->__left_ +// nor any of its children refer to __z. end_node->__left_ +// may be different than the value passed in as __root. +template +void +__tree_remove(_NodePtr __root, _NodePtr __z) _NOEXCEPT +{ + // __z will be removed from the tree. Client still needs to destruct/deallocate it + // __y is either __z, or if __z has two children, __tree_next(__z). + // __y will have at most one child. + // __y will be the initial hole in the tree (make the hole at a leaf) + _NodePtr __y = (__z->__left_ == nullptr || __z->__right_ == nullptr) ? + __z : __tree_next(__z); + // __x is __y's possibly null single child + _NodePtr __x = __y->__left_ != nullptr ? __y->__left_ : __y->__right_; + // __w is __x's possibly null uncle (will become __x's sibling) + _NodePtr __w = nullptr; + // link __x to __y's parent, and find __w + if (__x != nullptr) + __x->__parent_ = __y->__parent_; + if (__tree_is_left_child(__y)) + { + __y->__parent_->__left_ = __x; + if (__y != __root) + __w = __y->__parent_unsafe()->__right_; + else + __root = __x; // __w == nullptr + } + else + { + __y->__parent_unsafe()->__right_ = __x; + // __y can't be root if it is a right child + __w = __y->__parent_->__left_; + } + bool __removed_black = __y->__is_black_; + // If we didn't remove __z, do so now by splicing in __y for __z, + // but copy __z's color. This does not impact __x or __w. + if (__y != __z) + { + // __z->__left_ != nulptr but __z->__right_ might == __x == nullptr + __y->__parent_ = __z->__parent_; + if (__tree_is_left_child(__z)) + __y->__parent_->__left_ = __y; + else + __y->__parent_unsafe()->__right_ = __y; + __y->__left_ = __z->__left_; + __y->__left_->__set_parent(__y); + __y->__right_ = __z->__right_; + if (__y->__right_ != nullptr) + __y->__right_->__set_parent(__y); + __y->__is_black_ = __z->__is_black_; + if (__root == __z) + __root = __y; + } + // There is no need to rebalance if we removed a red, or if we removed + // the last node. + if (__removed_black && __root != nullptr) + { + // Rebalance: + // __x has an implicit black color (transferred from the removed __y) + // associated with it, no matter what its color is. + // If __x is __root (in which case it can't be null), it is supposed + // to be black anyway, and if it is doubly black, then the double + // can just be ignored. + // If __x is red (in which case it can't be null), then it can absorb + // the implicit black just by setting its color to black. + // Since __y was black and only had one child (which __x points to), __x + // is either red with no children, else null, otherwise __y would have + // different black heights under left and right pointers. + // if (__x == __root || __x != nullptr && !__x->__is_black_) + if (__x != nullptr) + __x->__is_black_ = true; + else + { + // Else __x isn't root, and is "doubly black", even though it may + // be null. __w can not be null here, else the parent would + // see a black height >= 2 on the __x side and a black height + // of 1 on the __w side (__w must be a non-null black or a red + // with a non-null black child). + while (true) + { + if (!__tree_is_left_child(__w)) // if x is left child + { + if (!__w->__is_black_) + { + __w->__is_black_ = true; + __w->__parent_unsafe()->__is_black_ = false; + __tree_left_rotate(__w->__parent_unsafe()); + // __x is still valid + // reset __root only if necessary + if (__root == __w->__left_) + __root = __w; + // reset sibling, and it still can't be null + __w = __w->__left_->__right_; + } + // __w->__is_black_ is now true, __w may have null children + if ((__w->__left_ == nullptr || __w->__left_->__is_black_) && + (__w->__right_ == nullptr || __w->__right_->__is_black_)) + { + __w->__is_black_ = false; + __x = __w->__parent_unsafe(); + // __x can no longer be null + if (__x == __root || !__x->__is_black_) + { + __x->__is_black_ = true; + break; + } + // reset sibling, and it still can't be null + __w = __tree_is_left_child(__x) ? + __x->__parent_unsafe()->__right_ : + __x->__parent_->__left_; + // continue; + } + else // __w has a red child + { + if (__w->__right_ == nullptr || __w->__right_->__is_black_) + { + // __w left child is non-null and red + __w->__left_->__is_black_ = true; + __w->__is_black_ = false; + __tree_right_rotate(__w); + // __w is known not to be root, so root hasn't changed + // reset sibling, and it still can't be null + __w = __w->__parent_unsafe(); + } + // __w has a right red child, left child may be null + __w->__is_black_ = __w->__parent_unsafe()->__is_black_; + __w->__parent_unsafe()->__is_black_ = true; + __w->__right_->__is_black_ = true; + __tree_left_rotate(__w->__parent_unsafe()); + break; + } + } + else + { + if (!__w->__is_black_) + { + __w->__is_black_ = true; + __w->__parent_unsafe()->__is_black_ = false; + __tree_right_rotate(__w->__parent_unsafe()); + // __x is still valid + // reset __root only if necessary + if (__root == __w->__right_) + __root = __w; + // reset sibling, and it still can't be null + __w = __w->__right_->__left_; + } + // __w->__is_black_ is now true, __w may have null children + if ((__w->__left_ == nullptr || __w->__left_->__is_black_) && + (__w->__right_ == nullptr || __w->__right_->__is_black_)) + { + __w->__is_black_ = false; + __x = __w->__parent_unsafe(); + // __x can no longer be null + if (!__x->__is_black_ || __x == __root) + { + __x->__is_black_ = true; + break; + } + // reset sibling, and it still can't be null + __w = __tree_is_left_child(__x) ? + __x->__parent_unsafe()->__right_ : + __x->__parent_->__left_; + // continue; + } + else // __w has a red child + { + if (__w->__left_ == nullptr || __w->__left_->__is_black_) + { + // __w right child is non-null and red + __w->__right_->__is_black_ = true; + __w->__is_black_ = false; + __tree_left_rotate(__w); + // __w is known not to be root, so root hasn't changed + // reset sibling, and it still can't be null + __w = __w->__parent_unsafe(); + } + // __w has a left red child, right child may be null + __w->__is_black_ = __w->__parent_unsafe()->__is_black_; + __w->__parent_unsafe()->__is_black_ = true; + __w->__left_->__is_black_ = true; + __tree_right_rotate(__w->__parent_unsafe()); + break; + } + } + } + } + } +} + +// node traits + + +#ifndef _LIBCUDACXX_CXX03_LANG +template +struct __is_tree_value_type_imp : false_type {}; + +template +struct __is_tree_value_type_imp<__value_type<_Key, _Value>> : true_type {}; + +template +struct __is_tree_value_type : false_type {}; + +template +struct __is_tree_value_type<_One> : __is_tree_value_type_imp::type> {}; +#endif + +template +struct __tree_key_value_types { + typedef _Tp key_type; + typedef _Tp __node_value_type; + typedef _Tp __container_value_type; + static const bool __is_map = false; + + _LIBCUDACXX_INLINE_VISIBILITY + static key_type const& __get_key(_Tp const& __v) { + return __v; + } + _LIBCUDACXX_INLINE_VISIBILITY + static __container_value_type const& __get_value(__node_value_type const& __v) { + return __v; + } + _LIBCUDACXX_INLINE_VISIBILITY + static __container_value_type* __get_ptr(__node_value_type& __n) { + return _CUDA_VSTD::addressof(__n); + } +#ifndef _LIBCUDACXX_CXX03_LANG + _LIBCUDACXX_INLINE_VISIBILITY + static __container_value_type&& __move(__node_value_type& __v) { + return _CUDA_VSTD::move(__v); + } +#endif +}; + +template +struct __tree_key_value_types<__value_type<_Key, _Tp> > { + typedef _Key key_type; + typedef _Tp mapped_type; + typedef __value_type<_Key, _Tp> __node_value_type; + typedef pair __container_value_type; + typedef __container_value_type __map_value_type; + static const bool __is_map = true; + + _LIBCUDACXX_INLINE_VISIBILITY + static key_type const& + __get_key(__node_value_type const& __t) { + return __t.__get_value().first; + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + static typename enable_if<__is_same_uncvref<_Up, __container_value_type>::value, + key_type const&>::type + __get_key(_Up& __t) { + return __t.first; + } + + _LIBCUDACXX_INLINE_VISIBILITY + static __container_value_type const& + __get_value(__node_value_type const& __t) { + return __t.__get_value(); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + static typename enable_if<__is_same_uncvref<_Up, __container_value_type>::value, + __container_value_type const&>::type + __get_value(_Up& __t) { + return __t; + } + + _LIBCUDACXX_INLINE_VISIBILITY + static __container_value_type* __get_ptr(__node_value_type& __n) { + return _CUDA_VSTD::addressof(__n.__get_value()); + } + +#ifndef _LIBCUDACXX_CXX03_LANG + _LIBCUDACXX_INLINE_VISIBILITY + static pair __move(__node_value_type& __v) { + return __v.__move(); + } +#endif +}; + +template +struct __tree_node_base_types { + typedef _VoidPtr __void_pointer; + + typedef __tree_node_base<__void_pointer> __node_base_type; + typedef typename __rebind_pointer<_VoidPtr, __node_base_type>::type + __node_base_pointer; + + typedef __tree_end_node<__node_base_pointer> __end_node_type; + typedef typename __rebind_pointer<_VoidPtr, __end_node_type>::type + __end_node_pointer; +#if defined(_LIBCUDACXX_ABI_TREE_REMOVE_NODE_POINTER_UB) + typedef __end_node_pointer __parent_pointer; +#else + typedef typename conditional< + is_pointer<__end_node_pointer>::value, + __end_node_pointer, + __node_base_pointer>::type __parent_pointer; +#endif + +private: + static_assert((is_same::element_type, void>::value), + "_VoidPtr does not point to unqualified void type"); +}; + +template , + bool = _KVTypes::__is_map> +struct __tree_map_pointer_types {}; + +template +struct __tree_map_pointer_types<_Tp, _AllocPtr, _KVTypes, true> { + typedef typename _KVTypes::__map_value_type _Mv; + typedef typename __rebind_pointer<_AllocPtr, _Mv>::type + __map_value_type_pointer; + typedef typename __rebind_pointer<_AllocPtr, const _Mv>::type + __const_map_value_type_pointer; +}; + +template ::element_type> +struct __tree_node_types; + +template +struct __tree_node_types<_NodePtr, __tree_node<_Tp, _VoidPtr> > + : public __tree_node_base_types<_VoidPtr>, + __tree_key_value_types<_Tp>, + __tree_map_pointer_types<_Tp, _VoidPtr> +{ + typedef __tree_node_base_types<_VoidPtr> __base; + typedef __tree_key_value_types<_Tp> __key_base; + typedef __tree_map_pointer_types<_Tp, _VoidPtr> __map_pointer_base; +public: + + typedef typename pointer_traits<_NodePtr>::element_type __node_type; + typedef _NodePtr __node_pointer; + + typedef _Tp __node_value_type; + typedef typename __rebind_pointer<_VoidPtr, __node_value_type>::type + __node_value_type_pointer; + typedef typename __rebind_pointer<_VoidPtr, const __node_value_type>::type + __const_node_value_type_pointer; +#if defined(_LIBCUDACXX_ABI_TREE_REMOVE_NODE_POINTER_UB) + typedef typename __base::__end_node_pointer __iter_pointer; +#else + typedef typename conditional< + is_pointer<__node_pointer>::value, + typename __base::__end_node_pointer, + __node_pointer>::type __iter_pointer; +#endif +private: + static_assert(!is_const<__node_type>::value, + "_NodePtr should never be a pointer to const"); + static_assert((is_same::type, + _NodePtr>::value), "_VoidPtr does not rebind to _NodePtr."); +}; + +template +struct __make_tree_node_types { + typedef typename __rebind_pointer<_VoidPtr, __tree_node<_ValueTp, _VoidPtr> >::type + _NodePtr; + typedef __tree_node_types<_NodePtr> type; +}; + +// node + +template +class __tree_end_node +{ +public: + typedef _Pointer pointer; + pointer __left_; + + _LIBCUDACXX_INLINE_VISIBILITY + __tree_end_node() _NOEXCEPT : __left_() {} +}; + +template +class __tree_node_base + : public __tree_node_base_types<_VoidPtr>::__end_node_type +{ + typedef __tree_node_base_types<_VoidPtr> _NodeBaseTypes; + +public: + typedef typename _NodeBaseTypes::__node_base_pointer pointer; + typedef typename _NodeBaseTypes::__parent_pointer __parent_pointer; + + pointer __right_; + __parent_pointer __parent_; + bool __is_black_; + + _LIBCUDACXX_INLINE_VISIBILITY + pointer __parent_unsafe() const { return static_cast(__parent_);} + + _LIBCUDACXX_INLINE_VISIBILITY + void __set_parent(pointer __p) { + __parent_ = static_cast<__parent_pointer>(__p); + } + +private: + ~__tree_node_base() _LIBCUDACXX_EQUAL_DELETE; + __tree_node_base(__tree_node_base const&) _LIBCUDACXX_EQUAL_DELETE; + __tree_node_base& operator=(__tree_node_base const&) _LIBCUDACXX_EQUAL_DELETE; +}; + +template +class __tree_node + : public __tree_node_base<_VoidPtr> +{ +public: + typedef _Tp __node_value_type; + + __node_value_type __value_; + +private: + ~__tree_node() _LIBCUDACXX_EQUAL_DELETE; + __tree_node(__tree_node const&) _LIBCUDACXX_EQUAL_DELETE; + __tree_node& operator=(__tree_node const&) _LIBCUDACXX_EQUAL_DELETE; +}; + + +template +class __tree_node_destructor +{ + typedef _Allocator allocator_type; + typedef allocator_traits __alloc_traits; + +public: + typedef typename __alloc_traits::pointer pointer; +private: + typedef __tree_node_types _NodeTypes; + allocator_type& __na_; + +public: + bool __value_constructed; + + __tree_node_destructor(const __tree_node_destructor &) = default; + __tree_node_destructor& operator=(const __tree_node_destructor&) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + explicit __tree_node_destructor(allocator_type& __na, bool __val = false) _NOEXCEPT + : __na_(__na), + __value_constructed(__val) + {} + + _LIBCUDACXX_INLINE_VISIBILITY + void operator()(pointer __p) _NOEXCEPT + { + if (__value_constructed) + __alloc_traits::destroy(__na_, _NodeTypes::__get_ptr(__p->__value_)); + if (__p) + __alloc_traits::deallocate(__na_, __p, 1); + } + + template friend class __map_node_destructor; +}; + +#if _LIBCUDACXX_STD_VER > 14 +template +struct __generic_container_node_destructor; +template +struct __generic_container_node_destructor<__tree_node<_Tp, _VoidPtr>, _Alloc> + : __tree_node_destructor<_Alloc> +{ + using __tree_node_destructor<_Alloc>::__tree_node_destructor; +}; +#endif + +template +class _LIBCUDACXX_TEMPLATE_VIS __tree_iterator +{ + typedef __tree_node_types<_NodePtr> _NodeTypes; + typedef _NodePtr __node_pointer; + typedef typename _NodeTypes::__node_base_pointer __node_base_pointer; + typedef typename _NodeTypes::__end_node_pointer __end_node_pointer; + typedef typename _NodeTypes::__iter_pointer __iter_pointer; + typedef pointer_traits<__node_pointer> __pointer_traits; + + __iter_pointer __ptr_; + +public: + typedef bidirectional_iterator_tag iterator_category; + typedef _Tp value_type; + typedef _DiffType difference_type; + typedef value_type& reference; + typedef typename _NodeTypes::__node_value_type_pointer pointer; + + _LIBCUDACXX_INLINE_VISIBILITY __tree_iterator() _NOEXCEPT +#if _LIBCUDACXX_STD_VER > 11 + : __ptr_(nullptr) +#endif + {} + + _LIBCUDACXX_INLINE_VISIBILITY reference operator*() const + {return __get_np()->__value_;} + _LIBCUDACXX_INLINE_VISIBILITY pointer operator->() const + {return pointer_traits::pointer_to(__get_np()->__value_);} + + _LIBCUDACXX_INLINE_VISIBILITY + __tree_iterator& operator++() { + __ptr_ = static_cast<__iter_pointer>( + __tree_next_iter<__end_node_pointer>(static_cast<__node_base_pointer>(__ptr_))); + return *this; + } + _LIBCUDACXX_INLINE_VISIBILITY + __tree_iterator operator++(int) + {__tree_iterator __t(*this); ++(*this); return __t;} + + _LIBCUDACXX_INLINE_VISIBILITY + __tree_iterator& operator--() { + __ptr_ = static_cast<__iter_pointer>(__tree_prev_iter<__node_base_pointer>( + static_cast<__end_node_pointer>(__ptr_))); + return *this; + } + _LIBCUDACXX_INLINE_VISIBILITY + __tree_iterator operator--(int) + {__tree_iterator __t(*this); --(*this); return __t;} + + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator==(const __tree_iterator& __x, const __tree_iterator& __y) + {return __x.__ptr_ == __y.__ptr_;} + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator!=(const __tree_iterator& __x, const __tree_iterator& __y) + {return !(__x == __y);} + +private: + _LIBCUDACXX_INLINE_VISIBILITY + explicit __tree_iterator(__node_pointer __p) _NOEXCEPT : __ptr_(__p) {} + _LIBCUDACXX_INLINE_VISIBILITY + explicit __tree_iterator(__end_node_pointer __p) _NOEXCEPT : __ptr_(__p) {} + _LIBCUDACXX_INLINE_VISIBILITY + __node_pointer __get_np() const { return static_cast<__node_pointer>(__ptr_); } + template friend class __tree; + template friend class _LIBCUDACXX_TEMPLATE_VIS __tree_const_iterator; + template friend class _LIBCUDACXX_TEMPLATE_VIS __map_iterator; + template friend class _LIBCUDACXX_TEMPLATE_VIS map; + template friend class _LIBCUDACXX_TEMPLATE_VIS multimap; + template friend class _LIBCUDACXX_TEMPLATE_VIS set; + template friend class _LIBCUDACXX_TEMPLATE_VIS multiset; +}; + +template +class _LIBCUDACXX_TEMPLATE_VIS __tree_const_iterator +{ + typedef __tree_node_types<_NodePtr> _NodeTypes; + typedef typename _NodeTypes::__node_pointer __node_pointer; + typedef typename _NodeTypes::__node_base_pointer __node_base_pointer; + typedef typename _NodeTypes::__end_node_pointer __end_node_pointer; + typedef typename _NodeTypes::__iter_pointer __iter_pointer; + typedef pointer_traits<__node_pointer> __pointer_traits; + + __iter_pointer __ptr_; + +public: + typedef bidirectional_iterator_tag iterator_category; + typedef _Tp value_type; + typedef _DiffType difference_type; + typedef const value_type& reference; + typedef typename _NodeTypes::__const_node_value_type_pointer pointer; + + _LIBCUDACXX_INLINE_VISIBILITY __tree_const_iterator() _NOEXCEPT +#if _LIBCUDACXX_STD_VER > 11 + : __ptr_(nullptr) +#endif + {} + +private: + typedef __tree_iterator + __non_const_iterator; +public: + _LIBCUDACXX_INLINE_VISIBILITY + __tree_const_iterator(__non_const_iterator __p) _NOEXCEPT + : __ptr_(__p.__ptr_) {} + + _LIBCUDACXX_INLINE_VISIBILITY reference operator*() const + {return __get_np()->__value_;} + _LIBCUDACXX_INLINE_VISIBILITY pointer operator->() const + {return pointer_traits::pointer_to(__get_np()->__value_);} + + _LIBCUDACXX_INLINE_VISIBILITY + __tree_const_iterator& operator++() { + __ptr_ = static_cast<__iter_pointer>( + __tree_next_iter<__end_node_pointer>(static_cast<__node_base_pointer>(__ptr_))); + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY + __tree_const_iterator operator++(int) + {__tree_const_iterator __t(*this); ++(*this); return __t;} + + _LIBCUDACXX_INLINE_VISIBILITY + __tree_const_iterator& operator--() { + __ptr_ = static_cast<__iter_pointer>(__tree_prev_iter<__node_base_pointer>( + static_cast<__end_node_pointer>(__ptr_))); + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY + __tree_const_iterator operator--(int) + {__tree_const_iterator __t(*this); --(*this); return __t;} + + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator==(const __tree_const_iterator& __x, const __tree_const_iterator& __y) + {return __x.__ptr_ == __y.__ptr_;} + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator!=(const __tree_const_iterator& __x, const __tree_const_iterator& __y) + {return !(__x == __y);} + +private: + _LIBCUDACXX_INLINE_VISIBILITY + explicit __tree_const_iterator(__node_pointer __p) _NOEXCEPT + : __ptr_(__p) {} + _LIBCUDACXX_INLINE_VISIBILITY + explicit __tree_const_iterator(__end_node_pointer __p) _NOEXCEPT + : __ptr_(__p) {} + _LIBCUDACXX_INLINE_VISIBILITY + __node_pointer __get_np() const { return static_cast<__node_pointer>(__ptr_); } + + template friend class __tree; + template friend class _LIBCUDACXX_TEMPLATE_VIS map; + template friend class _LIBCUDACXX_TEMPLATE_VIS multimap; + template friend class _LIBCUDACXX_TEMPLATE_VIS set; + template friend class _LIBCUDACXX_TEMPLATE_VIS multiset; + template friend class _LIBCUDACXX_TEMPLATE_VIS __map_const_iterator; + +}; + +template +#ifndef _LIBCUDACXX_CXX03_LANG + _LIBCUDACXX_DIAGNOSE_WARNING(!std::__invokable<_Compare const&, _Tp const&, _Tp const&>::value, + "the specified comparator type does not provide a viable const call operator") +#endif +int __diagnose_non_const_comparator(); + +template +class __tree +{ +public: + typedef _Tp value_type; + typedef _Compare value_compare; + typedef _Allocator allocator_type; + +private: + typedef allocator_traits __alloc_traits; + typedef typename __make_tree_node_types::type + _NodeTypes; + typedef typename _NodeTypes::key_type key_type; +public: + typedef typename _NodeTypes::__node_value_type __node_value_type; + typedef typename _NodeTypes::__container_value_type __container_value_type; + + typedef typename __alloc_traits::pointer pointer; + typedef typename __alloc_traits::const_pointer const_pointer; + typedef typename __alloc_traits::size_type size_type; + typedef typename __alloc_traits::difference_type difference_type; + +public: + typedef typename _NodeTypes::__void_pointer __void_pointer; + + typedef typename _NodeTypes::__node_type __node; + typedef typename _NodeTypes::__node_pointer __node_pointer; + + typedef typename _NodeTypes::__node_base_type __node_base; + typedef typename _NodeTypes::__node_base_pointer __node_base_pointer; + + typedef typename _NodeTypes::__end_node_type __end_node_t; + typedef typename _NodeTypes::__end_node_pointer __end_node_ptr; + + typedef typename _NodeTypes::__parent_pointer __parent_pointer; + typedef typename _NodeTypes::__iter_pointer __iter_pointer; + + typedef typename __rebind_alloc_helper<__alloc_traits, __node>::type __node_allocator; + typedef allocator_traits<__node_allocator> __node_traits; + +private: + // check for sane allocator pointer rebinding semantics. Rebinding the + // allocator for a new pointer type should be exactly the same as rebinding + // the pointer using 'pointer_traits'. + static_assert((is_same<__node_pointer, typename __node_traits::pointer>::value), + "Allocator does not rebind pointers in a sane manner."); + typedef typename __rebind_alloc_helper<__node_traits, __node_base>::type + __node_base_allocator; + typedef allocator_traits<__node_base_allocator> __node_base_traits; + static_assert((is_same<__node_base_pointer, typename __node_base_traits::pointer>::value), + "Allocator does not rebind pointers in a sane manner."); + +private: + __iter_pointer __begin_node_; + __compressed_pair<__end_node_t, __node_allocator> __pair1_; + __compressed_pair __pair3_; + +public: + _LIBCUDACXX_INLINE_VISIBILITY + __iter_pointer __end_node() _NOEXCEPT + { + return static_cast<__iter_pointer>( + pointer_traits<__end_node_ptr>::pointer_to(__pair1_.first()) + ); + } + _LIBCUDACXX_INLINE_VISIBILITY + __iter_pointer __end_node() const _NOEXCEPT + { + return static_cast<__iter_pointer>( + pointer_traits<__end_node_ptr>::pointer_to( + const_cast<__end_node_t&>(__pair1_.first()) + ) + ); + } + _LIBCUDACXX_INLINE_VISIBILITY + __node_allocator& __node_alloc() _NOEXCEPT {return __pair1_.second();} +private: + _LIBCUDACXX_INLINE_VISIBILITY + const __node_allocator& __node_alloc() const _NOEXCEPT + {return __pair1_.second();} + _LIBCUDACXX_INLINE_VISIBILITY + __iter_pointer& __begin_node() _NOEXCEPT {return __begin_node_;} + _LIBCUDACXX_INLINE_VISIBILITY + const __iter_pointer& __begin_node() const _NOEXCEPT {return __begin_node_;} +public: + _LIBCUDACXX_INLINE_VISIBILITY + allocator_type __alloc() const _NOEXCEPT + {return allocator_type(__node_alloc());} +private: + _LIBCUDACXX_INLINE_VISIBILITY + size_type& size() _NOEXCEPT {return __pair3_.first();} +public: + _LIBCUDACXX_INLINE_VISIBILITY + const size_type& size() const _NOEXCEPT {return __pair3_.first();} + _LIBCUDACXX_INLINE_VISIBILITY + value_compare& value_comp() _NOEXCEPT {return __pair3_.second();} + _LIBCUDACXX_INLINE_VISIBILITY + const value_compare& value_comp() const _NOEXCEPT + {return __pair3_.second();} +public: + + _LIBCUDACXX_INLINE_VISIBILITY + __node_pointer __root() const _NOEXCEPT + {return static_cast<__node_pointer>(__end_node()->__left_);} + + __node_base_pointer* __root_ptr() const _NOEXCEPT { + return _CUDA_VSTD::addressof(__end_node()->__left_); + } + + typedef __tree_iterator iterator; + typedef __tree_const_iterator const_iterator; + + explicit __tree(const value_compare& __comp) + _NOEXCEPT_( + is_nothrow_default_constructible<__node_allocator>::value && + is_nothrow_copy_constructible::value); + explicit __tree(const allocator_type& __a); + __tree(const value_compare& __comp, const allocator_type& __a); + __tree(const __tree& __t); + __tree& operator=(const __tree& __t); + template + void __assign_unique(_ForwardIterator __first, _ForwardIterator __last); + template + void __assign_multi(_InputIterator __first, _InputIterator __last); +#ifndef _LIBCUDACXX_CXX03_LANG + __tree(__tree&& __t) + _NOEXCEPT_( + is_nothrow_move_constructible<__node_allocator>::value && + is_nothrow_move_constructible::value); + __tree(__tree&& __t, const allocator_type& __a); + __tree& operator=(__tree&& __t) + _NOEXCEPT_( + __node_traits::propagate_on_container_move_assignment::value && + is_nothrow_move_assignable::value && + is_nothrow_move_assignable<__node_allocator>::value); +#endif // _LIBCUDACXX_CXX03_LANG + + ~__tree(); + + _LIBCUDACXX_INLINE_VISIBILITY + iterator begin() _NOEXCEPT {return iterator(__begin_node());} + _LIBCUDACXX_INLINE_VISIBILITY + const_iterator begin() const _NOEXCEPT {return const_iterator(__begin_node());} + _LIBCUDACXX_INLINE_VISIBILITY + iterator end() _NOEXCEPT {return iterator(__end_node());} + _LIBCUDACXX_INLINE_VISIBILITY + const_iterator end() const _NOEXCEPT {return const_iterator(__end_node());} + + _LIBCUDACXX_INLINE_VISIBILITY + size_type max_size() const _NOEXCEPT + {return std::min( + __node_traits::max_size(__node_alloc()), + numeric_limits::max());} + + void clear() _NOEXCEPT; + + void swap(__tree& __t) +#if _LIBCUDACXX_STD_VER <= 11 + _NOEXCEPT_( + __is_nothrow_swappable::value + && (!__node_traits::propagate_on_container_swap::value || + __is_nothrow_swappable<__node_allocator>::value) + ); +#else + _NOEXCEPT_(__is_nothrow_swappable::value); +#endif + +#ifndef _LIBCUDACXX_CXX03_LANG + template + pair + __emplace_unique_key_args(_Key const&, _Args&&... __args); + template + iterator + __emplace_hint_unique_key_args(const_iterator, _Key const&, _Args&&...); + + template + pair __emplace_unique_impl(_Args&&... __args); + + template + iterator __emplace_hint_unique_impl(const_iterator __p, _Args&&... __args); + + template + iterator __emplace_multi(_Args&&... __args); + + template + iterator __emplace_hint_multi(const_iterator __p, _Args&&... __args); + + template + _LIBCUDACXX_INLINE_VISIBILITY + pair __emplace_unique(_Pp&& __x) { + return __emplace_unique_extract_key(_CUDA_VSTD::forward<_Pp>(__x), + __can_extract_key<_Pp, key_type>()); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename enable_if< + __can_extract_map_key<_First, key_type, __container_value_type>::value, + pair + >::type __emplace_unique(_First&& __f, _Second&& __s) { + return __emplace_unique_key_args(__f, _CUDA_VSTD::forward<_First>(__f), + _CUDA_VSTD::forward<_Second>(__s)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pair __emplace_unique(_Args&&... __args) { + return __emplace_unique_impl(_CUDA_VSTD::forward<_Args>(__args)...); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pair + __emplace_unique_extract_key(_Pp&& __x, __extract_key_fail_tag) { + return __emplace_unique_impl(_CUDA_VSTD::forward<_Pp>(__x)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pair + __emplace_unique_extract_key(_Pp&& __x, __extract_key_self_tag) { + return __emplace_unique_key_args(__x, _CUDA_VSTD::forward<_Pp>(__x)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + pair + __emplace_unique_extract_key(_Pp&& __x, __extract_key_first_tag) { + return __emplace_unique_key_args(__x.first, _CUDA_VSTD::forward<_Pp>(__x)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __emplace_hint_unique(const_iterator __p, _Pp&& __x) { + return __emplace_hint_unique_extract_key(__p, _CUDA_VSTD::forward<_Pp>(__x), + __can_extract_key<_Pp, key_type>()); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + typename enable_if< + __can_extract_map_key<_First, key_type, __container_value_type>::value, + iterator + >::type __emplace_hint_unique(const_iterator __p, _First&& __f, _Second&& __s) { + return __emplace_hint_unique_key_args(__p, __f, + _CUDA_VSTD::forward<_First>(__f), + _CUDA_VSTD::forward<_Second>(__s)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __emplace_hint_unique(const_iterator __p, _Args&&... __args) { + return __emplace_hint_unique_impl(__p, _CUDA_VSTD::forward<_Args>(__args)...); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator + __emplace_hint_unique_extract_key(const_iterator __p, _Pp&& __x, __extract_key_fail_tag) { + return __emplace_hint_unique_impl(__p, _CUDA_VSTD::forward<_Pp>(__x)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator + __emplace_hint_unique_extract_key(const_iterator __p, _Pp&& __x, __extract_key_self_tag) { + return __emplace_hint_unique_key_args(__p, __x, _CUDA_VSTD::forward<_Pp>(__x)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator + __emplace_hint_unique_extract_key(const_iterator __p, _Pp&& __x, __extract_key_first_tag) { + return __emplace_hint_unique_key_args(__p, __x.first, _CUDA_VSTD::forward<_Pp>(__x)); + } + +#else + template + _LIBCUDACXX_INLINE_VISIBILITY + pair __emplace_unique_key_args(_Key const&, _Args& __args); + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __emplace_hint_unique_key_args(const_iterator, _Key const&, _Args&); +#endif + + _LIBCUDACXX_INLINE_VISIBILITY + pair __insert_unique(const __container_value_type& __v) { + return __emplace_unique_key_args(_NodeTypes::__get_key(__v), __v); + } + + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_unique(const_iterator __p, const __container_value_type& __v) { + return __emplace_hint_unique_key_args(__p, _NodeTypes::__get_key(__v), __v); + } + +#ifdef _LIBCUDACXX_CXX03_LANG + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_multi(const __container_value_type& __v); + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_multi(const_iterator __p, const __container_value_type& __v); +#else + _LIBCUDACXX_INLINE_VISIBILITY + pair __insert_unique(__container_value_type&& __v) { + return __emplace_unique_key_args(_NodeTypes::__get_key(__v), _CUDA_VSTD::move(__v)); + } + + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_unique(const_iterator __p, __container_value_type&& __v) { + return __emplace_hint_unique_key_args(__p, _NodeTypes::__get_key(__v), _CUDA_VSTD::move(__v)); + } + + template ::type, + __container_value_type + >::value + >::type> + _LIBCUDACXX_INLINE_VISIBILITY + pair __insert_unique(_Vp&& __v) { + return __emplace_unique(_CUDA_VSTD::forward<_Vp>(__v)); + } + + template ::type, + __container_value_type + >::value + >::type> + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_unique(const_iterator __p, _Vp&& __v) { + return __emplace_hint_unique(__p, _CUDA_VSTD::forward<_Vp>(__v)); + } + + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_multi(__container_value_type&& __v) { + return __emplace_multi(_CUDA_VSTD::move(__v)); + } + + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_multi(const_iterator __p, __container_value_type&& __v) { + return __emplace_hint_multi(__p, _CUDA_VSTD::move(__v)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_multi(_Vp&& __v) { + return __emplace_multi(_CUDA_VSTD::forward<_Vp>(__v)); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __insert_multi(const_iterator __p, _Vp&& __v) { + return __emplace_hint_multi(__p, _CUDA_VSTD::forward<_Vp>(__v)); + } + +#endif // !_LIBCUDACXX_CXX03_LANG + + _LIBCUDACXX_INLINE_VISIBILITY + pair __node_assign_unique(const __container_value_type& __v, __node_pointer __dest); + + _LIBCUDACXX_INLINE_VISIBILITY + iterator __node_insert_multi(__node_pointer __nd); + _LIBCUDACXX_INLINE_VISIBILITY + iterator __node_insert_multi(const_iterator __p, __node_pointer __nd); + + + _LIBCUDACXX_INLINE_VISIBILITY iterator + __remove_node_pointer(__node_pointer) _NOEXCEPT; + +#if _LIBCUDACXX_STD_VER > 14 + template + _LIBCUDACXX_INLINE_VISIBILITY + _InsertReturnType __node_handle_insert_unique(_NodeHandle&&); + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __node_handle_insert_unique(const_iterator, _NodeHandle&&); + template + _LIBCUDACXX_INLINE_VISIBILITY + void __node_handle_merge_unique(_Tree& __source); + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __node_handle_insert_multi(_NodeHandle&&); + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator __node_handle_insert_multi(const_iterator, _NodeHandle&&); + template + _LIBCUDACXX_INLINE_VISIBILITY + void __node_handle_merge_multi(_Tree& __source); + + + template + _LIBCUDACXX_INLINE_VISIBILITY + _NodeHandle __node_handle_extract(key_type const&); + template + _LIBCUDACXX_INLINE_VISIBILITY + _NodeHandle __node_handle_extract(const_iterator); +#endif + + iterator erase(const_iterator __p); + iterator erase(const_iterator __f, const_iterator __l); + template + size_type __erase_unique(const _Key& __k); + template + size_type __erase_multi(const _Key& __k); + + void __insert_node_at(__parent_pointer __parent, + __node_base_pointer& __child, + __node_base_pointer __new_node) _NOEXCEPT; + + template + iterator find(const _Key& __v); + template + const_iterator find(const _Key& __v) const; + + template + size_type __count_unique(const _Key& __k) const; + template + size_type __count_multi(const _Key& __k) const; + + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator lower_bound(const _Key& __v) + {return __lower_bound(__v, __root(), __end_node());} + template + iterator __lower_bound(const _Key& __v, + __node_pointer __root, + __iter_pointer __result); + template + _LIBCUDACXX_INLINE_VISIBILITY + const_iterator lower_bound(const _Key& __v) const + {return __lower_bound(__v, __root(), __end_node());} + template + const_iterator __lower_bound(const _Key& __v, + __node_pointer __root, + __iter_pointer __result) const; + template + _LIBCUDACXX_INLINE_VISIBILITY + iterator upper_bound(const _Key& __v) + {return __upper_bound(__v, __root(), __end_node());} + template + iterator __upper_bound(const _Key& __v, + __node_pointer __root, + __iter_pointer __result); + template + _LIBCUDACXX_INLINE_VISIBILITY + const_iterator upper_bound(const _Key& __v) const + {return __upper_bound(__v, __root(), __end_node());} + template + const_iterator __upper_bound(const _Key& __v, + __node_pointer __root, + __iter_pointer __result) const; + template + pair + __equal_range_unique(const _Key& __k); + template + pair + __equal_range_unique(const _Key& __k) const; + + template + pair + __equal_range_multi(const _Key& __k); + template + pair + __equal_range_multi(const _Key& __k) const; + + typedef __tree_node_destructor<__node_allocator> _Dp; + typedef unique_ptr<__node, _Dp> __node_holder; + + __node_holder remove(const_iterator __p) _NOEXCEPT; +private: + __node_base_pointer& + __find_leaf_low(__parent_pointer& __parent, const key_type& __v); + __node_base_pointer& + __find_leaf_high(__parent_pointer& __parent, const key_type& __v); + __node_base_pointer& + __find_leaf(const_iterator __hint, + __parent_pointer& __parent, const key_type& __v); + // FIXME: Make this function const qualified. Unfortunetly doing so + // breaks existing code which uses non-const callable comparators. + template + __node_base_pointer& + __find_equal(__parent_pointer& __parent, const _Key& __v); + template + _LIBCUDACXX_INLINE_VISIBILITY __node_base_pointer& + __find_equal(__parent_pointer& __parent, const _Key& __v) const { + return const_cast<__tree*>(this)->__find_equal(__parent, __v); + } + template + __node_base_pointer& + __find_equal(const_iterator __hint, __parent_pointer& __parent, + __node_base_pointer& __dummy, + const _Key& __v); + +#ifndef _LIBCUDACXX_CXX03_LANG + template + __node_holder __construct_node(_Args&& ...__args); +#else + __node_holder __construct_node(const __container_value_type& __v); +#endif + + void destroy(__node_pointer __nd) _NOEXCEPT; + + _LIBCUDACXX_INLINE_VISIBILITY + void __copy_assign_alloc(const __tree& __t) + {__copy_assign_alloc(__t, integral_constant());} + + _LIBCUDACXX_INLINE_VISIBILITY + void __copy_assign_alloc(const __tree& __t, true_type) + { + if (__node_alloc() != __t.__node_alloc()) + clear(); + __node_alloc() = __t.__node_alloc(); + } + _LIBCUDACXX_INLINE_VISIBILITY + void __copy_assign_alloc(const __tree&, false_type) {} + + void __move_assign(__tree& __t, false_type); + void __move_assign(__tree& __t, true_type) + _NOEXCEPT_(is_nothrow_move_assignable::value && + is_nothrow_move_assignable<__node_allocator>::value); + + _LIBCUDACXX_INLINE_VISIBILITY + void __move_assign_alloc(__tree& __t) + _NOEXCEPT_( + !__node_traits::propagate_on_container_move_assignment::value || + is_nothrow_move_assignable<__node_allocator>::value) + {__move_assign_alloc(__t, integral_constant());} + + _LIBCUDACXX_INLINE_VISIBILITY + void __move_assign_alloc(__tree& __t, true_type) + _NOEXCEPT_(is_nothrow_move_assignable<__node_allocator>::value) + {__node_alloc() = _CUDA_VSTD::move(__t.__node_alloc());} + _LIBCUDACXX_INLINE_VISIBILITY + void __move_assign_alloc(__tree&, false_type) _NOEXCEPT {} + + struct _DetachedTreeCache { + _LIBCUDACXX_INLINE_VISIBILITY + explicit _DetachedTreeCache(__tree *__t) _NOEXCEPT : __t_(__t), + __cache_root_(__detach_from_tree(__t)) { + __advance(); + } + + _LIBCUDACXX_INLINE_VISIBILITY + __node_pointer __get() const _NOEXCEPT { + return __cache_elem_; + } + + _LIBCUDACXX_INLINE_VISIBILITY + void __advance() _NOEXCEPT { + __cache_elem_ = __cache_root_; + if (__cache_root_) { + __cache_root_ = __detach_next(__cache_root_); + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + ~_DetachedTreeCache() { + __t_->destroy(__cache_elem_); + if (__cache_root_) { + while (__cache_root_->__parent_ != nullptr) + __cache_root_ = static_cast<__node_pointer>(__cache_root_->__parent_); + __t_->destroy(__cache_root_); + } + } + + _DetachedTreeCache(_DetachedTreeCache const&) = delete; + _DetachedTreeCache& operator=(_DetachedTreeCache const&) = delete; + + private: + _LIBCUDACXX_INLINE_VISIBILITY + static __node_pointer __detach_from_tree(__tree *__t) _NOEXCEPT; + _LIBCUDACXX_INLINE_VISIBILITY + static __node_pointer __detach_next(__node_pointer) _NOEXCEPT; + + __tree *__t_; + __node_pointer __cache_root_; + __node_pointer __cache_elem_; + }; + + + template friend class _LIBCUDACXX_TEMPLATE_VIS map; + template friend class _LIBCUDACXX_TEMPLATE_VIS multimap; +}; + +template +__tree<_Tp, _Compare, _Allocator>::__tree(const value_compare& __comp) + _NOEXCEPT_( + is_nothrow_default_constructible<__node_allocator>::value && + is_nothrow_copy_constructible::value) + : __pair3_(0, __comp) +{ + __begin_node() = __end_node(); +} + +template +__tree<_Tp, _Compare, _Allocator>::__tree(const allocator_type& __a) + : __begin_node_(__iter_pointer()), + __pair1_(__second_tag(), __node_allocator(__a)), + __pair3_(0) +{ + __begin_node() = __end_node(); +} + +template +__tree<_Tp, _Compare, _Allocator>::__tree(const value_compare& __comp, + const allocator_type& __a) + : __begin_node_(__iter_pointer()), + __pair1_(__second_tag(), __node_allocator(__a)), + __pair3_(0, __comp) +{ + __begin_node() = __end_node(); +} + +// Precondition: size() != 0 +template +typename __tree<_Tp, _Compare, _Allocator>::__node_pointer +__tree<_Tp, _Compare, _Allocator>::_DetachedTreeCache::__detach_from_tree(__tree *__t) _NOEXCEPT +{ + __node_pointer __cache = static_cast<__node_pointer>(__t->__begin_node()); + __t->__begin_node() = __t->__end_node(); + __t->__end_node()->__left_->__parent_ = nullptr; + __t->__end_node()->__left_ = nullptr; + __t->size() = 0; + // __cache->__left_ == nullptr + if (__cache->__right_ != nullptr) + __cache = static_cast<__node_pointer>(__cache->__right_); + // __cache->__left_ == nullptr + // __cache->__right_ == nullptr + return __cache; +} + +// Precondition: __cache != nullptr +// __cache->left_ == nullptr +// __cache->right_ == nullptr +// This is no longer a red-black tree +template +typename __tree<_Tp, _Compare, _Allocator>::__node_pointer +__tree<_Tp, _Compare, _Allocator>::_DetachedTreeCache::__detach_next(__node_pointer __cache) _NOEXCEPT +{ + if (__cache->__parent_ == nullptr) + return nullptr; + if (__tree_is_left_child(static_cast<__node_base_pointer>(__cache))) + { + __cache->__parent_->__left_ = nullptr; + __cache = static_cast<__node_pointer>(__cache->__parent_); + if (__cache->__right_ == nullptr) + return __cache; + return static_cast<__node_pointer>(__tree_leaf(__cache->__right_)); + } + // __cache is right child + __cache->__parent_unsafe()->__right_ = nullptr; + __cache = static_cast<__node_pointer>(__cache->__parent_); + if (__cache->__left_ == nullptr) + return __cache; + return static_cast<__node_pointer>(__tree_leaf(__cache->__left_)); +} + +template +__tree<_Tp, _Compare, _Allocator>& +__tree<_Tp, _Compare, _Allocator>::operator=(const __tree& __t) +{ + if (this != &__t) + { + value_comp() = __t.value_comp(); + __copy_assign_alloc(__t); + __assign_multi(__t.begin(), __t.end()); + } + return *this; +} + +template +template +void +__tree<_Tp, _Compare, _Allocator>::__assign_unique(_ForwardIterator __first, _ForwardIterator __last) +{ + typedef iterator_traits<_ForwardIterator> _ITraits; + typedef typename _ITraits::value_type _ItValueType; + static_assert((is_same<_ItValueType, __container_value_type>::value), + "__assign_unique may only be called with the containers value type"); + static_assert(__is_forward_iterator<_ForwardIterator>::value, + "__assign_unique requires a forward iterator"); + if (size() != 0) + { + _DetachedTreeCache __cache(this); + for (; __cache.__get() != nullptr && __first != __last; ++__first) { + if (__node_assign_unique(*__first, __cache.__get()).second) + __cache.__advance(); + } + } + for (; __first != __last; ++__first) + __insert_unique(*__first); +} + +template +template +void +__tree<_Tp, _Compare, _Allocator>::__assign_multi(_InputIterator __first, _InputIterator __last) +{ + typedef iterator_traits<_InputIterator> _ITraits; + typedef typename _ITraits::value_type _ItValueType; + static_assert((is_same<_ItValueType, __container_value_type>::value || + is_same<_ItValueType, __node_value_type>::value), + "__assign_multi may only be called with the containers value type" + " or the nodes value type"); + if (size() != 0) + { + _DetachedTreeCache __cache(this); + for (; __cache.__get() && __first != __last; ++__first) { + __cache.__get()->__value_ = *__first; + __node_insert_multi(__cache.__get()); + __cache.__advance(); + } + } + for (; __first != __last; ++__first) + __insert_multi(_NodeTypes::__get_value(*__first)); +} + +template +__tree<_Tp, _Compare, _Allocator>::__tree(const __tree& __t) + : __begin_node_(__iter_pointer()), + __pair1_(__second_tag(), __node_traits::select_on_container_copy_construction(__t.__node_alloc())), + __pair3_(0, __t.value_comp()) +{ + __begin_node() = __end_node(); +} + +#ifndef _LIBCUDACXX_CXX03_LANG + +template +__tree<_Tp, _Compare, _Allocator>::__tree(__tree&& __t) + _NOEXCEPT_( + is_nothrow_move_constructible<__node_allocator>::value && + is_nothrow_move_constructible::value) + : __begin_node_(_CUDA_VSTD::move(__t.__begin_node_)), + __pair1_(_CUDA_VSTD::move(__t.__pair1_)), + __pair3_(_CUDA_VSTD::move(__t.__pair3_)) +{ + if (size() == 0) + __begin_node() = __end_node(); + else + { + __end_node()->__left_->__parent_ = static_cast<__parent_pointer>(__end_node()); + __t.__begin_node() = __t.__end_node(); + __t.__end_node()->__left_ = nullptr; + __t.size() = 0; + } +} + +template +__tree<_Tp, _Compare, _Allocator>::__tree(__tree&& __t, const allocator_type& __a) + : __pair1_(__second_tag(), __node_allocator(__a)), + __pair3_(0, _CUDA_VSTD::move(__t.value_comp())) +{ + if (__a == __t.__alloc()) + { + if (__t.size() == 0) + __begin_node() = __end_node(); + else + { + __begin_node() = __t.__begin_node(); + __end_node()->__left_ = __t.__end_node()->__left_; + __end_node()->__left_->__parent_ = static_cast<__parent_pointer>(__end_node()); + size() = __t.size(); + __t.__begin_node() = __t.__end_node(); + __t.__end_node()->__left_ = nullptr; + __t.size() = 0; + } + } + else + { + __begin_node() = __end_node(); + } +} + +template +void +__tree<_Tp, _Compare, _Allocator>::__move_assign(__tree& __t, true_type) + _NOEXCEPT_(is_nothrow_move_assignable::value && + is_nothrow_move_assignable<__node_allocator>::value) +{ + destroy(static_cast<__node_pointer>(__end_node()->__left_)); + __begin_node_ = __t.__begin_node_; + __pair1_.first() = __t.__pair1_.first(); + __move_assign_alloc(__t); + __pair3_ = _CUDA_VSTD::move(__t.__pair3_); + if (size() == 0) + __begin_node() = __end_node(); + else + { + __end_node()->__left_->__parent_ = static_cast<__parent_pointer>(__end_node()); + __t.__begin_node() = __t.__end_node(); + __t.__end_node()->__left_ = nullptr; + __t.size() = 0; + } +} + +template +void +__tree<_Tp, _Compare, _Allocator>::__move_assign(__tree& __t, false_type) +{ + if (__node_alloc() == __t.__node_alloc()) + __move_assign(__t, true_type()); + else + { + value_comp() = _CUDA_VSTD::move(__t.value_comp()); + const_iterator __e = end(); + if (size() != 0) + { + _DetachedTreeCache __cache(this); + while (__cache.__get() != nullptr && __t.size() != 0) { + __cache.__get()->__value_ = _CUDA_VSTD::move(__t.remove(__t.begin())->__value_); + __node_insert_multi(__cache.__get()); + __cache.__advance(); + } + } + while (__t.size() != 0) + __insert_multi(__e, _NodeTypes::__move(__t.remove(__t.begin())->__value_)); + } +} + +template +__tree<_Tp, _Compare, _Allocator>& +__tree<_Tp, _Compare, _Allocator>::operator=(__tree&& __t) + _NOEXCEPT_( + __node_traits::propagate_on_container_move_assignment::value && + is_nothrow_move_assignable::value && + is_nothrow_move_assignable<__node_allocator>::value) + +{ + __move_assign(__t, integral_constant()); + return *this; +} + +#endif // _LIBCUDACXX_CXX03_LANG + +template +__tree<_Tp, _Compare, _Allocator>::~__tree() +{ + static_assert((is_copy_constructible::value), + "Comparator must be copy-constructible."); + destroy(__root()); +} + +template +void +__tree<_Tp, _Compare, _Allocator>::destroy(__node_pointer __nd) _NOEXCEPT +{ + if (__nd != nullptr) + { + destroy(static_cast<__node_pointer>(__nd->__left_)); + destroy(static_cast<__node_pointer>(__nd->__right_)); + __node_allocator& __na = __node_alloc(); + __node_traits::destroy(__na, _NodeTypes::__get_ptr(__nd->__value_)); + __node_traits::deallocate(__na, __nd, 1); + } +} + +template +void +__tree<_Tp, _Compare, _Allocator>::swap(__tree& __t) +#if _LIBCUDACXX_STD_VER <= 11 + _NOEXCEPT_( + __is_nothrow_swappable::value + && (!__node_traits::propagate_on_container_swap::value || + __is_nothrow_swappable<__node_allocator>::value) + ) +#else + _NOEXCEPT_(__is_nothrow_swappable::value) +#endif +{ + using _CUDA_VSTD::swap; + swap(__begin_node_, __t.__begin_node_); + swap(__pair1_.first(), __t.__pair1_.first()); + __swap_allocator(__node_alloc(), __t.__node_alloc()); + __pair3_.swap(__t.__pair3_); + if (size() == 0) + __begin_node() = __end_node(); + else + __end_node()->__left_->__parent_ = static_cast<__parent_pointer>(__end_node()); + if (__t.size() == 0) + __t.__begin_node() = __t.__end_node(); + else + __t.__end_node()->__left_->__parent_ = static_cast<__parent_pointer>(__t.__end_node()); +} + +template +void +__tree<_Tp, _Compare, _Allocator>::clear() _NOEXCEPT +{ + destroy(__root()); + size() = 0; + __begin_node() = __end_node(); + __end_node()->__left_ = nullptr; +} + +// Find lower_bound place to insert +// Set __parent to parent of null leaf +// Return reference to null leaf +template +typename __tree<_Tp, _Compare, _Allocator>::__node_base_pointer& +__tree<_Tp, _Compare, _Allocator>::__find_leaf_low(__parent_pointer& __parent, + const key_type& __v) +{ + __node_pointer __nd = __root(); + if (__nd != nullptr) + { + while (true) + { + if (value_comp()(__nd->__value_, __v)) + { + if (__nd->__right_ != nullptr) + __nd = static_cast<__node_pointer>(__nd->__right_); + else + { + __parent = static_cast<__parent_pointer>(__nd); + return __nd->__right_; + } + } + else + { + if (__nd->__left_ != nullptr) + __nd = static_cast<__node_pointer>(__nd->__left_); + else + { + __parent = static_cast<__parent_pointer>(__nd); + return __parent->__left_; + } + } + } + } + __parent = static_cast<__parent_pointer>(__end_node()); + return __parent->__left_; +} + +// Find upper_bound place to insert +// Set __parent to parent of null leaf +// Return reference to null leaf +template +typename __tree<_Tp, _Compare, _Allocator>::__node_base_pointer& +__tree<_Tp, _Compare, _Allocator>::__find_leaf_high(__parent_pointer& __parent, + const key_type& __v) +{ + __node_pointer __nd = __root(); + if (__nd != nullptr) + { + while (true) + { + if (value_comp()(__v, __nd->__value_)) + { + if (__nd->__left_ != nullptr) + __nd = static_cast<__node_pointer>(__nd->__left_); + else + { + __parent = static_cast<__parent_pointer>(__nd); + return __parent->__left_; + } + } + else + { + if (__nd->__right_ != nullptr) + __nd = static_cast<__node_pointer>(__nd->__right_); + else + { + __parent = static_cast<__parent_pointer>(__nd); + return __nd->__right_; + } + } + } + } + __parent = static_cast<__parent_pointer>(__end_node()); + return __parent->__left_; +} + +// Find leaf place to insert closest to __hint +// First check prior to __hint. +// Next check after __hint. +// Next do O(log N) search. +// Set __parent to parent of null leaf +// Return reference to null leaf +template +typename __tree<_Tp, _Compare, _Allocator>::__node_base_pointer& +__tree<_Tp, _Compare, _Allocator>::__find_leaf(const_iterator __hint, + __parent_pointer& __parent, + const key_type& __v) +{ + if (__hint == end() || !value_comp()(*__hint, __v)) // check before + { + // __v <= *__hint + const_iterator __prior = __hint; + if (__prior == begin() || !value_comp()(__v, *--__prior)) + { + // *prev(__hint) <= __v <= *__hint + if (__hint.__ptr_->__left_ == nullptr) + { + __parent = static_cast<__parent_pointer>(__hint.__ptr_); + return __parent->__left_; + } + else + { + __parent = static_cast<__parent_pointer>(__prior.__ptr_); + return static_cast<__node_base_pointer>(__prior.__ptr_)->__right_; + } + } + // __v < *prev(__hint) + return __find_leaf_high(__parent, __v); + } + // else __v > *__hint + return __find_leaf_low(__parent, __v); +} + +// Find place to insert if __v doesn't exist +// Set __parent to parent of null leaf +// Return reference to null leaf +// If __v exists, set parent to node of __v and return reference to node of __v +template +template +typename __tree<_Tp, _Compare, _Allocator>::__node_base_pointer& +__tree<_Tp, _Compare, _Allocator>::__find_equal(__parent_pointer& __parent, + const _Key& __v) +{ + __node_pointer __nd = __root(); + __node_base_pointer* __nd_ptr = __root_ptr(); + if (__nd != nullptr) + { + while (true) + { + if (value_comp()(__v, __nd->__value_)) + { + if (__nd->__left_ != nullptr) { + __nd_ptr = _CUDA_VSTD::addressof(__nd->__left_); + __nd = static_cast<__node_pointer>(__nd->__left_); + } else { + __parent = static_cast<__parent_pointer>(__nd); + return __parent->__left_; + } + } + else if (value_comp()(__nd->__value_, __v)) + { + if (__nd->__right_ != nullptr) { + __nd_ptr = _CUDA_VSTD::addressof(__nd->__right_); + __nd = static_cast<__node_pointer>(__nd->__right_); + } else { + __parent = static_cast<__parent_pointer>(__nd); + return __nd->__right_; + } + } + else + { + __parent = static_cast<__parent_pointer>(__nd); + return *__nd_ptr; + } + } + } + __parent = static_cast<__parent_pointer>(__end_node()); + return __parent->__left_; +} + +// Find place to insert if __v doesn't exist +// First check prior to __hint. +// Next check after __hint. +// Next do O(log N) search. +// Set __parent to parent of null leaf +// Return reference to null leaf +// If __v exists, set parent to node of __v and return reference to node of __v +template +template +typename __tree<_Tp, _Compare, _Allocator>::__node_base_pointer& +__tree<_Tp, _Compare, _Allocator>::__find_equal(const_iterator __hint, + __parent_pointer& __parent, + __node_base_pointer& __dummy, + const _Key& __v) +{ + if (__hint == end() || value_comp()(__v, *__hint)) // check before + { + // __v < *__hint + const_iterator __prior = __hint; + if (__prior == begin() || value_comp()(*--__prior, __v)) + { + // *prev(__hint) < __v < *__hint + if (__hint.__ptr_->__left_ == nullptr) + { + __parent = static_cast<__parent_pointer>(__hint.__ptr_); + return __parent->__left_; + } + else + { + __parent = static_cast<__parent_pointer>(__prior.__ptr_); + return static_cast<__node_base_pointer>(__prior.__ptr_)->__right_; + } + } + // __v <= *prev(__hint) + return __find_equal(__parent, __v); + } + else if (value_comp()(*__hint, __v)) // check after + { + // *__hint < __v + const_iterator __next = _CUDA_VSTD::next(__hint); + if (__next == end() || value_comp()(__v, *__next)) + { + // *__hint < __v < *_CUDA_VSTD::next(__hint) + if (__hint.__get_np()->__right_ == nullptr) + { + __parent = static_cast<__parent_pointer>(__hint.__ptr_); + return static_cast<__node_base_pointer>(__hint.__ptr_)->__right_; + } + else + { + __parent = static_cast<__parent_pointer>(__next.__ptr_); + return __parent->__left_; + } + } + // *next(__hint) <= __v + return __find_equal(__parent, __v); + } + // else __v == *__hint + __parent = static_cast<__parent_pointer>(__hint.__ptr_); + __dummy = static_cast<__node_base_pointer>(__hint.__ptr_); + return __dummy; +} + +template +void __tree<_Tp, _Compare, _Allocator>::__insert_node_at( + __parent_pointer __parent, __node_base_pointer& __child, + __node_base_pointer __new_node) _NOEXCEPT +{ + __new_node->__left_ = nullptr; + __new_node->__right_ = nullptr; + __new_node->__parent_ = __parent; + // __new_node->__is_black_ is initialized in __tree_balance_after_insert + __child = __new_node; + if (__begin_node()->__left_ != nullptr) + __begin_node() = static_cast<__iter_pointer>(__begin_node()->__left_); + __tree_balance_after_insert(__end_node()->__left_, __child); + ++size(); +} + +#ifndef _LIBCUDACXX_CXX03_LANG +template +template +pair::iterator, bool> +__tree<_Tp, _Compare, _Allocator>::__emplace_unique_key_args(_Key const& __k, _Args&&... __args) +#else +template +template +pair::iterator, bool> +__tree<_Tp, _Compare, _Allocator>::__emplace_unique_key_args(_Key const& __k, _Args& __args) +#endif +{ + __parent_pointer __parent; + __node_base_pointer& __child = __find_equal(__parent, __k); + __node_pointer __r = static_cast<__node_pointer>(__child); + bool __inserted = false; + if (__child == nullptr) + { +#ifndef _LIBCUDACXX_CXX03_LANG + __node_holder __h = __construct_node(_CUDA_VSTD::forward<_Args>(__args)...); +#else + __node_holder __h = __construct_node(__args); +#endif + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__h.get())); + __r = __h.release(); + __inserted = true; + } + return pair(iterator(__r), __inserted); +} + + +#ifndef _LIBCUDACXX_CXX03_LANG +template +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__emplace_hint_unique_key_args( + const_iterator __p, _Key const& __k, _Args&&... __args) +#else +template +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__emplace_hint_unique_key_args( + const_iterator __p, _Key const& __k, _Args& __args) +#endif +{ + __parent_pointer __parent; + __node_base_pointer __dummy; + __node_base_pointer& __child = __find_equal(__p, __parent, __dummy, __k); + __node_pointer __r = static_cast<__node_pointer>(__child); + if (__child == nullptr) + { +#ifndef _LIBCUDACXX_CXX03_LANG + __node_holder __h = __construct_node(_CUDA_VSTD::forward<_Args>(__args)...); +#else + __node_holder __h = __construct_node(__args); +#endif + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__h.get())); + __r = __h.release(); + } + return iterator(__r); +} + + +#ifndef _LIBCUDACXX_CXX03_LANG + +template +template +typename __tree<_Tp, _Compare, _Allocator>::__node_holder +__tree<_Tp, _Compare, _Allocator>::__construct_node(_Args&& ...__args) +{ + static_assert(!__is_tree_value_type<_Args...>::value, + "Cannot construct from __value_type"); + __node_allocator& __na = __node_alloc(); + __node_holder __h(__node_traits::allocate(__na, 1), _Dp(__na)); + __node_traits::construct(__na, _NodeTypes::__get_ptr(__h->__value_), _CUDA_VSTD::forward<_Args>(__args)...); + __h.get_deleter().__value_constructed = true; + return __h; +} + + +template +template +pair::iterator, bool> +__tree<_Tp, _Compare, _Allocator>::__emplace_unique_impl(_Args&&... __args) +{ + __node_holder __h = __construct_node(_CUDA_VSTD::forward<_Args>(__args)...); + __parent_pointer __parent; + __node_base_pointer& __child = __find_equal(__parent, __h->__value_); + __node_pointer __r = static_cast<__node_pointer>(__child); + bool __inserted = false; + if (__child == nullptr) + { + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__h.get())); + __r = __h.release(); + __inserted = true; + } + return pair(iterator(__r), __inserted); +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__emplace_hint_unique_impl(const_iterator __p, _Args&&... __args) +{ + __node_holder __h = __construct_node(_CUDA_VSTD::forward<_Args>(__args)...); + __parent_pointer __parent; + __node_base_pointer __dummy; + __node_base_pointer& __child = __find_equal(__p, __parent, __dummy, __h->__value_); + __node_pointer __r = static_cast<__node_pointer>(__child); + if (__child == nullptr) + { + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__h.get())); + __r = __h.release(); + } + return iterator(__r); +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__emplace_multi(_Args&&... __args) +{ + __node_holder __h = __construct_node(_CUDA_VSTD::forward<_Args>(__args)...); + __parent_pointer __parent; + __node_base_pointer& __child = __find_leaf_high(__parent, _NodeTypes::__get_key(__h->__value_)); + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__h.get())); + return iterator(static_cast<__node_pointer>(__h.release())); +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__emplace_hint_multi(const_iterator __p, + _Args&&... __args) +{ + __node_holder __h = __construct_node(_CUDA_VSTD::forward<_Args>(__args)...); + __parent_pointer __parent; + __node_base_pointer& __child = __find_leaf(__p, __parent, _NodeTypes::__get_key(__h->__value_)); + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__h.get())); + return iterator(static_cast<__node_pointer>(__h.release())); +} + + +#else // _LIBCUDACXX_CXX03_LANG + +template +typename __tree<_Tp, _Compare, _Allocator>::__node_holder +__tree<_Tp, _Compare, _Allocator>::__construct_node(const __container_value_type& __v) +{ + __node_allocator& __na = __node_alloc(); + __node_holder __h(__node_traits::allocate(__na, 1), _Dp(__na)); + __node_traits::construct(__na, _NodeTypes::__get_ptr(__h->__value_), __v); + __h.get_deleter().__value_constructed = true; + return _LIBCUDACXX_EXPLICIT_MOVE(__h); // explicitly moved for C++03 +} + +#endif // _LIBCUDACXX_CXX03_LANG + +#ifdef _LIBCUDACXX_CXX03_LANG +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__insert_multi(const __container_value_type& __v) +{ + __parent_pointer __parent; + __node_base_pointer& __child = __find_leaf_high(__parent, _NodeTypes::__get_key(__v)); + __node_holder __h = __construct_node(__v); + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__h.get())); + return iterator(__h.release()); +} + +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__insert_multi(const_iterator __p, const __container_value_type& __v) +{ + __parent_pointer __parent; + __node_base_pointer& __child = __find_leaf(__p, __parent, _NodeTypes::__get_key(__v)); + __node_holder __h = __construct_node(__v); + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__h.get())); + return iterator(__h.release()); +} +#endif + +template +pair::iterator, bool> +__tree<_Tp, _Compare, _Allocator>::__node_assign_unique(const __container_value_type& __v, __node_pointer __nd) +{ + __parent_pointer __parent; + __node_base_pointer& __child = __find_equal(__parent, _NodeTypes::__get_key(__v)); + __node_pointer __r = static_cast<__node_pointer>(__child); + bool __inserted = false; + if (__child == nullptr) + { + __nd->__value_ = __v; + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__nd)); + __r = __nd; + __inserted = true; + } + return pair(iterator(__r), __inserted); +} + + +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__node_insert_multi(__node_pointer __nd) +{ + __parent_pointer __parent; + __node_base_pointer& __child = __find_leaf_high(__parent, _NodeTypes::__get_key(__nd->__value_)); + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__nd)); + return iterator(__nd); +} + +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__node_insert_multi(const_iterator __p, + __node_pointer __nd) +{ + __parent_pointer __parent; + __node_base_pointer& __child = __find_leaf(__p, __parent, _NodeTypes::__get_key(__nd->__value_)); + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__nd)); + return iterator(__nd); +} + +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__remove_node_pointer(__node_pointer __ptr) _NOEXCEPT +{ + iterator __r(__ptr); + ++__r; + if (__begin_node() == __ptr) + __begin_node() = __r.__ptr_; + --size(); + __tree_remove(__end_node()->__left_, + static_cast<__node_base_pointer>(__ptr)); + return __r; +} + +#if _LIBCUDACXX_STD_VER > 14 +template +template +_LIBCUDACXX_INLINE_VISIBILITY +_InsertReturnType +__tree<_Tp, _Compare, _Allocator>::__node_handle_insert_unique( + _NodeHandle&& __nh) +{ + if (__nh.empty()) + return _InsertReturnType{end(), false, _NodeHandle()}; + + __node_pointer __ptr = __nh.__ptr_; + __parent_pointer __parent; + __node_base_pointer& __child = __find_equal(__parent, + __ptr->__value_); + if (__child != nullptr) + return _InsertReturnType{ + iterator(static_cast<__node_pointer>(__child)), + false, _CUDA_VSTD::move(__nh)}; + + __insert_node_at(__parent, __child, + static_cast<__node_base_pointer>(__ptr)); + __nh.__release_ptr(); + return _InsertReturnType{iterator(__ptr), true, _NodeHandle()}; +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__node_handle_insert_unique( + const_iterator __hint, _NodeHandle&& __nh) +{ + if (__nh.empty()) + return end(); + + __node_pointer __ptr = __nh.__ptr_; + __parent_pointer __parent; + __node_base_pointer __dummy; + __node_base_pointer& __child = __find_equal(__hint, __parent, __dummy, + __ptr->__value_); + __node_pointer __r = static_cast<__node_pointer>(__child); + if (__child == nullptr) + { + __insert_node_at(__parent, __child, + static_cast<__node_base_pointer>(__ptr)); + __r = __ptr; + __nh.__release_ptr(); + } + return iterator(__r); +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +_NodeHandle +__tree<_Tp, _Compare, _Allocator>::__node_handle_extract(key_type const& __key) +{ + iterator __it = find(__key); + if (__it == end()) + return _NodeHandle(); + return __node_handle_extract<_NodeHandle>(__it); +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +_NodeHandle +__tree<_Tp, _Compare, _Allocator>::__node_handle_extract(const_iterator __p) +{ + __node_pointer __np = __p.__get_np(); + __remove_node_pointer(__np); + return _NodeHandle(__np, __alloc()); +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +void +__tree<_Tp, _Compare, _Allocator>::__node_handle_merge_unique(_Tree& __source) +{ + static_assert(is_same::value, ""); + + for (typename _Tree::iterator __i = __source.begin(); + __i != __source.end();) + { + __node_pointer __src_ptr = __i.__get_np(); + __parent_pointer __parent; + __node_base_pointer& __child = + __find_equal(__parent, _NodeTypes::__get_key(__src_ptr->__value_)); + ++__i; + if (__child != nullptr) + continue; + __source.__remove_node_pointer(__src_ptr); + __insert_node_at(__parent, __child, + static_cast<__node_base_pointer>(__src_ptr)); + } +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__node_handle_insert_multi(_NodeHandle&& __nh) +{ + if (__nh.empty()) + return end(); + __node_pointer __ptr = __nh.__ptr_; + __parent_pointer __parent; + __node_base_pointer& __child = __find_leaf_high( + __parent, _NodeTypes::__get_key(__ptr->__value_)); + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__ptr)); + __nh.__release_ptr(); + return iterator(__ptr); +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__node_handle_insert_multi( + const_iterator __hint, _NodeHandle&& __nh) +{ + if (__nh.empty()) + return end(); + + __node_pointer __ptr = __nh.__ptr_; + __parent_pointer __parent; + __node_base_pointer& __child = __find_leaf(__hint, __parent, + _NodeTypes::__get_key(__ptr->__value_)); + __insert_node_at(__parent, __child, static_cast<__node_base_pointer>(__ptr)); + __nh.__release_ptr(); + return iterator(__ptr); +} + +template +template +_LIBCUDACXX_INLINE_VISIBILITY +void +__tree<_Tp, _Compare, _Allocator>::__node_handle_merge_multi(_Tree& __source) +{ + static_assert(is_same::value, ""); + + for (typename _Tree::iterator __i = __source.begin(); + __i != __source.end();) + { + __node_pointer __src_ptr = __i.__get_np(); + __parent_pointer __parent; + __node_base_pointer& __child = __find_leaf_high( + __parent, _NodeTypes::__get_key(__src_ptr->__value_)); + ++__i; + __source.__remove_node_pointer(__src_ptr); + __insert_node_at(__parent, __child, + static_cast<__node_base_pointer>(__src_ptr)); + } +} + +#endif // _LIBCUDACXX_STD_VER > 14 + +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::erase(const_iterator __p) +{ + __node_pointer __np = __p.__get_np(); + iterator __r = __remove_node_pointer(__np); + __node_allocator& __na = __node_alloc(); + __node_traits::destroy(__na, _NodeTypes::__get_ptr( + const_cast<__node_value_type&>(*__p))); + __node_traits::deallocate(__na, __np, 1); + return __r; +} + +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::erase(const_iterator __f, const_iterator __l) +{ + while (__f != __l) + __f = erase(__f); + return iterator(__l.__ptr_); +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::size_type +__tree<_Tp, _Compare, _Allocator>::__erase_unique(const _Key& __k) +{ + iterator __i = find(__k); + if (__i == end()) + return 0; + erase(__i); + return 1; +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::size_type +__tree<_Tp, _Compare, _Allocator>::__erase_multi(const _Key& __k) +{ + pair __p = __equal_range_multi(__k); + size_type __r = 0; + for (; __p.first != __p.second; ++__r) + __p.first = erase(__p.first); + return __r; +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::find(const _Key& __v) +{ + iterator __p = __lower_bound(__v, __root(), __end_node()); + if (__p != end() && !value_comp()(__v, *__p)) + return __p; + return end(); +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::const_iterator +__tree<_Tp, _Compare, _Allocator>::find(const _Key& __v) const +{ + const_iterator __p = __lower_bound(__v, __root(), __end_node()); + if (__p != end() && !value_comp()(__v, *__p)) + return __p; + return end(); +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::size_type +__tree<_Tp, _Compare, _Allocator>::__count_unique(const _Key& __k) const +{ + __node_pointer __rt = __root(); + while (__rt != nullptr) + { + if (value_comp()(__k, __rt->__value_)) + { + __rt = static_cast<__node_pointer>(__rt->__left_); + } + else if (value_comp()(__rt->__value_, __k)) + __rt = static_cast<__node_pointer>(__rt->__right_); + else + return 1; + } + return 0; +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::size_type +__tree<_Tp, _Compare, _Allocator>::__count_multi(const _Key& __k) const +{ + __iter_pointer __result = __end_node(); + __node_pointer __rt = __root(); + while (__rt != nullptr) + { + if (value_comp()(__k, __rt->__value_)) + { + __result = static_cast<__iter_pointer>(__rt); + __rt = static_cast<__node_pointer>(__rt->__left_); + } + else if (value_comp()(__rt->__value_, __k)) + __rt = static_cast<__node_pointer>(__rt->__right_); + else + return _CUDA_VSTD::distance( + __lower_bound(__k, static_cast<__node_pointer>(__rt->__left_), static_cast<__iter_pointer>(__rt)), + __upper_bound(__k, static_cast<__node_pointer>(__rt->__right_), __result) + ); + } + return 0; +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__lower_bound(const _Key& __v, + __node_pointer __root, + __iter_pointer __result) +{ + while (__root != nullptr) + { + if (!value_comp()(__root->__value_, __v)) + { + __result = static_cast<__iter_pointer>(__root); + __root = static_cast<__node_pointer>(__root->__left_); + } + else + __root = static_cast<__node_pointer>(__root->__right_); + } + return iterator(__result); +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::const_iterator +__tree<_Tp, _Compare, _Allocator>::__lower_bound(const _Key& __v, + __node_pointer __root, + __iter_pointer __result) const +{ + while (__root != nullptr) + { + if (!value_comp()(__root->__value_, __v)) + { + __result = static_cast<__iter_pointer>(__root); + __root = static_cast<__node_pointer>(__root->__left_); + } + else + __root = static_cast<__node_pointer>(__root->__right_); + } + return const_iterator(__result); +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::iterator +__tree<_Tp, _Compare, _Allocator>::__upper_bound(const _Key& __v, + __node_pointer __root, + __iter_pointer __result) +{ + while (__root != nullptr) + { + if (value_comp()(__v, __root->__value_)) + { + __result = static_cast<__iter_pointer>(__root); + __root = static_cast<__node_pointer>(__root->__left_); + } + else + __root = static_cast<__node_pointer>(__root->__right_); + } + return iterator(__result); +} + +template +template +typename __tree<_Tp, _Compare, _Allocator>::const_iterator +__tree<_Tp, _Compare, _Allocator>::__upper_bound(const _Key& __v, + __node_pointer __root, + __iter_pointer __result) const +{ + while (__root != nullptr) + { + if (value_comp()(__v, __root->__value_)) + { + __result = static_cast<__iter_pointer>(__root); + __root = static_cast<__node_pointer>(__root->__left_); + } + else + __root = static_cast<__node_pointer>(__root->__right_); + } + return const_iterator(__result); +} + +template +template +pair::iterator, + typename __tree<_Tp, _Compare, _Allocator>::iterator> +__tree<_Tp, _Compare, _Allocator>::__equal_range_unique(const _Key& __k) +{ + typedef pair _Pp; + __iter_pointer __result = __end_node(); + __node_pointer __rt = __root(); + while (__rt != nullptr) + { + if (value_comp()(__k, __rt->__value_)) + { + __result = static_cast<__iter_pointer>(__rt); + __rt = static_cast<__node_pointer>(__rt->__left_); + } + else if (value_comp()(__rt->__value_, __k)) + __rt = static_cast<__node_pointer>(__rt->__right_); + else + return _Pp(iterator(__rt), + iterator( + __rt->__right_ != nullptr ? + static_cast<__iter_pointer>(__tree_min(__rt->__right_)) + : __result)); + } + return _Pp(iterator(__result), iterator(__result)); +} + +template +template +pair::const_iterator, + typename __tree<_Tp, _Compare, _Allocator>::const_iterator> +__tree<_Tp, _Compare, _Allocator>::__equal_range_unique(const _Key& __k) const +{ + typedef pair _Pp; + __iter_pointer __result = __end_node(); + __node_pointer __rt = __root(); + while (__rt != nullptr) + { + if (value_comp()(__k, __rt->__value_)) + { + __result = static_cast<__iter_pointer>(__rt); + __rt = static_cast<__node_pointer>(__rt->__left_); + } + else if (value_comp()(__rt->__value_, __k)) + __rt = static_cast<__node_pointer>(__rt->__right_); + else + return _Pp(const_iterator(__rt), + const_iterator( + __rt->__right_ != nullptr ? + static_cast<__iter_pointer>(__tree_min(__rt->__right_)) + : __result)); + } + return _Pp(const_iterator(__result), const_iterator(__result)); +} + +template +template +pair::iterator, + typename __tree<_Tp, _Compare, _Allocator>::iterator> +__tree<_Tp, _Compare, _Allocator>::__equal_range_multi(const _Key& __k) +{ + typedef pair _Pp; + __iter_pointer __result = __end_node(); + __node_pointer __rt = __root(); + while (__rt != nullptr) + { + if (value_comp()(__k, __rt->__value_)) + { + __result = static_cast<__iter_pointer>(__rt); + __rt = static_cast<__node_pointer>(__rt->__left_); + } + else if (value_comp()(__rt->__value_, __k)) + __rt = static_cast<__node_pointer>(__rt->__right_); + else + return _Pp(__lower_bound(__k, static_cast<__node_pointer>(__rt->__left_), static_cast<__iter_pointer>(__rt)), + __upper_bound(__k, static_cast<__node_pointer>(__rt->__right_), __result)); + } + return _Pp(iterator(__result), iterator(__result)); +} + +template +template +pair::const_iterator, + typename __tree<_Tp, _Compare, _Allocator>::const_iterator> +__tree<_Tp, _Compare, _Allocator>::__equal_range_multi(const _Key& __k) const +{ + typedef pair _Pp; + __iter_pointer __result = __end_node(); + __node_pointer __rt = __root(); + while (__rt != nullptr) + { + if (value_comp()(__k, __rt->__value_)) + { + __result = static_cast<__iter_pointer>(__rt); + __rt = static_cast<__node_pointer>(__rt->__left_); + } + else if (value_comp()(__rt->__value_, __k)) + __rt = static_cast<__node_pointer>(__rt->__right_); + else + return _Pp(__lower_bound(__k, static_cast<__node_pointer>(__rt->__left_), static_cast<__iter_pointer>(__rt)), + __upper_bound(__k, static_cast<__node_pointer>(__rt->__right_), __result)); + } + return _Pp(const_iterator(__result), const_iterator(__result)); +} + +template +typename __tree<_Tp, _Compare, _Allocator>::__node_holder +__tree<_Tp, _Compare, _Allocator>::remove(const_iterator __p) _NOEXCEPT +{ + __node_pointer __np = __p.__get_np(); + if (__begin_node() == __p.__ptr_) + { + if (__np->__right_ != nullptr) + __begin_node() = static_cast<__iter_pointer>(__np->__right_); + else + __begin_node() = static_cast<__iter_pointer>(__np->__parent_); + } + --size(); + __tree_remove(__end_node()->__left_, + static_cast<__node_base_pointer>(__np)); + return __node_holder(__np, _Dp(__node_alloc(), true)); +} + +template +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(__tree<_Tp, _Compare, _Allocator>& __x, + __tree<_Tp, _Compare, _Allocator>& __y) + _NOEXCEPT_(_NOEXCEPT_(__x.swap(__y))) +{ + __x.swap(__y); +} + +_LIBCUDACXX_END_NAMESPACE_STD + +_LIBCUDACXX_POP_MACROS + +#endif // _LIBCUDACXX___TREE diff --git a/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__tuple b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__tuple new file mode 100644 index 000000000000..357e3156761e --- /dev/null +++ b/thirdparty/manifold/thirdparty/thrust/dependencies/libcudacxx/include/cuda/std/detail/libcxx/include/__tuple @@ -0,0 +1,567 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___TUPLE +#define _LIBCUDACXX___TUPLE + +#ifndef __cuda_std__ +#include <__config> +#include +#include +#include <__pragma_push> +#endif //__cuda_std__ + +#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) +#pragma GCC system_header +#endif + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +template struct _LIBCUDACXX_TEMPLATE_VIS tuple_size; + +#if !defined(_LIBCUDACXX_CXX03_LANG) +template +using __enable_if_tuple_size_imp = _Tp; + +template +struct _LIBCUDACXX_TEMPLATE_VIS tuple_size<__enable_if_tuple_size_imp< + const _Tp, + typename enable_if::value>::type, + integral_constant)>>> + : public integral_constant::value> {}; + +template +struct _LIBCUDACXX_TEMPLATE_VIS tuple_size<__enable_if_tuple_size_imp< + volatile _Tp, + typename enable_if::value>::type, + integral_constant)>>> + : public integral_constant::value> {}; + +template +struct _LIBCUDACXX_TEMPLATE_VIS tuple_size<__enable_if_tuple_size_imp< + const volatile _Tp, + integral_constant)>>> + : public integral_constant::value> {}; + +#else +template struct _LIBCUDACXX_TEMPLATE_VIS tuple_size : public tuple_size<_Tp> {}; +template struct _LIBCUDACXX_TEMPLATE_VIS tuple_size : public tuple_size<_Tp> {}; +template struct _LIBCUDACXX_TEMPLATE_VIS tuple_size : public tuple_size<_Tp> {}; +#endif + +template struct _LIBCUDACXX_TEMPLATE_VIS tuple_element; + +template +struct _LIBCUDACXX_TEMPLATE_VIS tuple_element<_Ip, const _Tp> +{ + typedef _LIBCUDACXX_NODEBUG_TYPE typename add_const::type>::type type; +}; + +template +struct _LIBCUDACXX_TEMPLATE_VIS tuple_element<_Ip, volatile _Tp> +{ + typedef _LIBCUDACXX_NODEBUG_TYPE typename add_volatile::type>::type type; +}; + +template +struct _LIBCUDACXX_TEMPLATE_VIS tuple_element<_Ip, const volatile _Tp> +{ + typedef _LIBCUDACXX_NODEBUG_TYPE typename add_cv::type>::type type; +}; + +template struct __tuple_like : false_type {}; + +template struct __tuple_like : public __tuple_like<_Tp> {}; +template struct __tuple_like : public __tuple_like<_Tp> {}; +template struct __tuple_like : public __tuple_like<_Tp> {}; + +// tuple specializations + +#ifndef _LIBCUDACXX_CXX03_LANG + +template struct __tuple_indices {}; + +template +struct __integer_sequence { + template