Skip to content

Commit

Permalink
Fixing the wrong implementation of some math intrinsic (shader-slang#…
Browse files Browse the repository at this point in the history
…5491)

* Fixing the wrong implementation of some math intrinsic

Close the issue shader-slang#5282

The root cause of the issue is that log10 is not supported in wgsl.
So add the implementation.

Also ldexp in wgsl doesn't support float type exponent, so fix the
implementation of the intrinsic.

* re-enable the tests
  • Loading branch information
kaizhangNV authored Nov 5, 2024
1 parent 7c2ff54 commit 53dd592
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 8 deletions.
12 changes: 8 additions & 4 deletions source/slang/hlsl.meta.slang
Original file line number Diff line number Diff line change
Expand Up @@ -9498,7 +9498,8 @@ T ldexp(T x, T exp)
__target_switch
{
case hlsl: __intrinsic_asm "ldexp";
case wgsl: __intrinsic_asm "ldexp";
// In WGSL spec, ldexp can only take integer as the exponent.
case wgsl: __intrinsic_asm "($0 * exp2($1))";
default:
return x * exp2(exp);
}
Expand All @@ -9512,7 +9513,8 @@ vector<T, N> ldexp(vector<T, N> x, vector<T, N> exp)
__target_switch
{
case hlsl: __intrinsic_asm "ldexp";
case wgsl: __intrinsic_asm "ldexp";
// In WGSL spec, ldexp can only take integer as the exponent.
case wgsl: __intrinsic_asm "($0 * exp2($1))";
default:
return x * exp2(exp);
}
Expand Down Expand Up @@ -9737,13 +9739,14 @@ matrix<T, N, M> log(matrix<T, N, M> x)
/// @category math
__generic<T : __BuiltinFloatingPointType>
[__readNone]
[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]
[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]
T log10(T x)
{
__target_switch
{
case hlsl: __intrinsic_asm "log10";
case metal: __intrinsic_asm "log10";
case wgsl: __intrinsic_asm "(log( $0 ) * $S0( 0.43429448190325182765112891891661) )";
case glsl: __intrinsic_asm "(log( $0 ) * $S0( 0.43429448190325182765112891891661) )";
case cuda: __intrinsic_asm "$P_log10($0)";
case cpp: __intrinsic_asm "$P_log10($0)";
Expand All @@ -9760,13 +9763,14 @@ T log10(T x)

__generic<T : __BuiltinFloatingPointType, let N : int>
[__readNone]
[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]
[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]
vector<T,N> log10(vector<T,N> x)
{
__target_switch
{
case hlsl: __intrinsic_asm "log10";
case metal: __intrinsic_asm "log10";
case wgsl: __intrinsic_asm "(log( $0 ) * $S0(0.43429448190325182765112891891661) )";
case glsl: __intrinsic_asm "(log( $0 ) * $S0(0.43429448190325182765112891891661) )";
case spirv:
{
Expand Down
2 changes: 0 additions & 2 deletions tests/expected-failure-github.txt
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,6 @@ tests/hlsl-intrinsic/classify-float.slang.5 syn (wgpu)
tests/hlsl-intrinsic/matrix-float.slang.6 syn (wgpu)
tests/hlsl-intrinsic/matrix-int.slang.6 syn (wgpu)
tests/hlsl-intrinsic/scalar-double-simple.slang.7 syn (wgpu)
tests/hlsl-intrinsic/scalar-float.slang.5 syn (wgpu)
tests/hlsl-intrinsic/vector-float.slang.5 syn (wgpu)
tests/ir/string-literal-hash.slang.2 syn (wgpu)
tests/language-feature/anonymous-struct.slang.1 syn (wgpu)
tests/language-feature/constants/constexpr-loop.slang.2 syn (wgpu)
Expand Down
2 changes: 1 addition & 1 deletion tests/hlsl-intrinsic/scalar-float.slang
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
//TEST(compute, vulkan):COMPARE_COMPUTE_EX:-vk -compute -shaderobj
//DISABLED_TEST(compute, vulkan):COMPARE_COMPUTE_EX:-vk -compute -shaderobj
//TEST(compute, vulkan):COMPARE_COMPUTE_EX:-cuda -compute -shaderobj
//DISABLE_TEST(compute):COMPARE_COMPUTE_EX:-wgpu
//TEST(compute):COMPARE_COMPUTE_EX:-wgpu -compute -shaderobj

//TEST_INPUT:ubuffer(data=[0 0 0 0], stride=4):out,name outputBuffer
RWStructuredBuffer<int> outputBuffer;
Expand Down
2 changes: 1 addition & 1 deletion tests/hlsl-intrinsic/vector-float.slang
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
//TEST(compute):COMPARE_COMPUTE_EX:-slang -compute -dx12 -use-dxil -output-using-type -shaderobj
//TEST(compute, vulkan):COMPARE_COMPUTE_EX:-vk -compute -output-using-type -shaderobj
//TEST(compute, vulkan):COMPARE_COMPUTE_EX:-cuda -compute -output-using-type -shaderobj
//DISABLE_TEST(compute):COMPARE_COMPUTE_EX:-wgpu
//TEST(compute):COMPARE_COMPUTE_EX:-wgpu -compute -output-using-type -shaderobj

//TEST_INPUT:ubuffer(data=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0], stride=4):out,name outputBuffer
RWStructuredBuffer<float4> outputBuffer;
Expand Down

0 comments on commit 53dd592

Please sign in to comment.