") # use 80% of viewport
- print_comparison_table(io, true)
- println(io, "
")
- println(io, "```")
+comparison_tables_file = joinpath("docs", "src", "comparison_tables.md")
+preamble_file = joinpath("docs", "src", "comparison_tables_preamble.md")
+cp(preamble_file, comparison_tables_file; force = true)
+open(comparison_tables_file, "a") do io
+ for (title, comparison_table_dict) in comparison_table_dicts
+ print_comparison_table(title, comparison_table_dict, io)
+ end
end
makedocs(;
sitename = "UnrolledUtilities.jl",
modules = [UnrolledUtilities],
- pages = ["Home" => "index.md", "Comparison Table" => "comparison_table.md"],
+ pages = [
+ "Home" => "index.md",
+ "Introduction" => "introduction.md",
+ "User Guide" => "user_guide.md",
+ "Developer Guide" => "developer_guide.md",
+ "Comparison Tables" => basename(comparison_tables_file),
+ ],
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true",
- size_threshold_ignore = ["comparison_table.md"],
+ sidebar_sitename = false,
+ size_threshold_ignore = [basename(comparison_tables_file)],
),
clean = true,
)
-rm(comparison_table_file)
+rm(comparison_tables_file)
deploydocs(
repo = "github.com/CliMA/UnrolledUtilities.jl.git",
diff --git a/logo-white.svg b/docs/src/assets/logo-dark.svg
similarity index 99%
rename from logo-white.svg
rename to docs/src/assets/logo-dark.svg
index 2daf34e..c8c43ac 100644
--- a/logo-white.svg
+++ b/docs/src/assets/logo-dark.svg
@@ -7,7 +7,7 @@
width="1567.9242"
height="279.37802"
viewBox="0 0 1567.9242 279.37802"
- sodipodi:docname="logo-white.svg"
+ sodipodi:docname="logo-dark.svg"
inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
diff --git a/docs/src/assets/logo.svg b/docs/src/assets/logo.svg
new file mode 100644
index 0000000..f02f78e
--- /dev/null
+++ b/docs/src/assets/logo.svg
@@ -0,0 +1,131 @@
+
+
+
+
diff --git a/docs/src/comparison_tables_preamble.md b/docs/src/comparison_tables_preamble.md
new file mode 100644
index 0000000..0170238
--- /dev/null
+++ b/docs/src/comparison_tables_preamble.md
@@ -0,0 +1,60 @@
+The following autogenerated tables contain a representative set of potential use
+cases for this package, along with measurements that summarize each case's
+performance, compilation, and memory usage:
+- run time (best of several trial measurements)
+- compilation time (as reported by the compiler)
+- overall level of optimization (type stability, constant propagation, etc.) and
+ allocations during run time (as reported by the garbage collector)
+- total allocations during compilation and first run (as reported by the garbage
+ collector and, when possible, the Julia process's resident set size estimator)
+
+The rows of the tables are highlighted as follows:
+- ```@raw html
+ light blue
+ ```
+ indicates an improvement in performance due to better optimization and either
+ an improvement or no change in compilation time and total allocations
+- ```@raw html
+ dark blue
+ ```
+ indicates an improvement in performance due to better optimization and either
+ slower compilation or more total allocations
+- ```@raw html
+ green
+ ```
+ indicates either faster run time or fewer allocations during run time and
+ either an improvement or no change in compilation time and total allocations
+- ```@raw html
+ yellow
+ ```
+ indicates either faster run time or fewer allocations during run time and
+ either slower compilation or more total allocations
+- ```@raw html
+ magenta
+ ```
+ indicates no change in performance and either an improvement or no change in
+ compilation time and total allocations
+- ```@raw html
+ light gray
+ ```
+ indicates no change in performance and either faster compilation with more
+ total allocations or slower compilation with fewer total allocations
+- ```@raw html
+ dark gray
+ ```
+ indicates no change in performance and no change in compilation time and total
+ allocations
+- ```@raw html
+ red
+ ```
+ indicates a deterioration in performance, or no change in performance and
+ either slower compilation or more total allocations
+
+Rows highlighted in gray present no clear advantage to unrolling, while those
+highlighted in red present a clear disadvantage. It is recommended that you only
+unroll when your use case is similar to a row in one of the remaining
+categories, each of which demonstrates some advantage to unrolling.
+
+The tables are also printed out by this package's test suite, so they can be
+compared across different operating systems by consulting the
+[CI pipeline](https://github.com/CliMA/UnrolledUtilities.jl/actions/workflows/ci.yml).
diff --git a/docs/src/developer_guide.md b/docs/src/developer_guide.md
new file mode 100644
index 0000000..a233d7b
--- /dev/null
+++ b/docs/src/developer_guide.md
@@ -0,0 +1,72 @@
+```@meta
+CurrentModule = UnrolledUtilities
+```
+
+## Unrollable Iterator Interface
+
+The functions exported by this package can be used with any statically sized
+iterators, as long as those iterators are compatible with following interface:
+
+```@docs
+generic_getindex
+output_type_for_promotion
+AmbiguousOutputType
+NoOutputType
+ConditionalOutputType
+output_promote_rule
+constructor_from_tuple
+```
+
+## Extending the Interface
+
+To unroll over a statically sized iterator of some user-defined type `T`, follow
+these steps:
+- Add a method for either `getindex(::T, n)` or `generic_getindex(::T, n)`
+- If every unrolled function that needs to construct an iterator when given an
+ iterator of type `T` can return a `Tuple` instead, stop here; otherwise, to
+ return a non-Tuple iterator whenever possible, follow these steps:
+ - Add a method for `output_type_for_promotion(::T) = O`, where `O` can be
+ `T`, a supertype of `T`, or some other `Type` or `AmbiguousOutputType`
+ - If an output of type `O` can be used together with an output of type `O′`,
+ add a method for `output_promote_rule(O, O′)`
+ - If an output of type `O` can be efficiently constructed from a `Tuple`,
+ add a method for `constructor_from_tuple(O)`; otherwise, add a method for
+ each unrolled function that can efficiently generate an output of type `O`
+ without temporarily storing it as a `Tuple`
+
+## Internals
+
+There are two general strategies for unrolling in Julia -- recursively splatting
+iterator contents or manually generating unrolled expressions. For example, a
+recursively unrolled version of `foreach` is
+
+```julia
+unrolled_foreach(f, itr) = _unrolled_foreach(f, itr...)
+_unrolled_foreach(f) = nothing
+_unrolled_foreach(f, item, items...) = (f(item); _unrolled_foreach(f, items...))
+```
+
+On the other hand, a generatively unrolled version of `foreach` is
+
+```julia
+unrolled_foreach(f, itr) = _unrolled_foreach(Val(length(itr)), f, itr)
+@generated _unrolled_foreach(::Val{N}, f, itr) where {N} =
+ Expr(:block, (:(f(itr[$n])) for n in 1:N)..., nothing)
+```
+
+In order to switch between recursive and generative unrolling, this package
+defines the following function:
+
+```@docs
+rec_unroll
+```
+
+The cutoff iterator length of 16 was chosen based on the benchmarks for
+[Generative vs. Recursive Unrolling](@ref).
+
+!!! tip "Tip"
+ Recursive unrolling can be disabled globally by redefining this function:
+
+ ```julia
+ rec_unroll(itr) = false
+ ```
diff --git a/docs/src/index.md b/docs/src/index.md
index 8aaec38..dfc2144 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -1,75 +1,18 @@
# UnrolledUtilities.jl
-A collection of generated functions in which all loops are unrolled and inlined:
-- `unrolled_any(f, itr)`: similar to `any`
-- `unrolled_all(f, itr)`: similar to `all`
-- `unrolled_foreach(f, itrs...)`: similar to `foreach`
-- `unrolled_map(f, itrs...)`: similar to `map`
-- `unrolled_reduce(op, itr; [init])`: similar to `reduce`
-- `unrolled_mapreduce(f, op, itrs...; [init])`: similar to `mapreduce`
-- `unrolled_zip(itrs...)`: similar to `zip`
-- `unrolled_enumerate(itrs...)`: similar to `enumerate`, but with the ability to
- handle multiple iterators
-- `unrolled_in(item, itr)`: similar to `in`
-- `unrolled_unique(itr)`: similar to `unique`
-- `unrolled_filter(f, itr)`: similar to `filter`
-- `unrolled_split(f, itr)`: similar to `(filter(f, itr), filter(!f, itr))`, but
- without duplicate calls to `f`
-- `unrolled_flatten(itr)`: similar to `Iterators.flatten`
-- `unrolled_flatmap(f, itrs...)`: similar to `Iterators.flatmap`
-- `unrolled_product(itrs...)`: similar to `Iterators.product`
-- `unrolled_applyat(f, n, itrs...)`: similar to `f(map(itr -> itr[n], itrs)...)`
-- `unrolled_take(itr, ::Val{N})`: similar to `itr[1:N]` (and to
- `Iterators.take`), but with `N` wrapped in a `Val`
-- `unrolled_drop(itr, ::Val{N})`: similar to `itr[(N + 1):end]` (and to
- `Iterators.drop`), but with `N` wrapped in a `Val`
+A toolkit for low-level optimization of Julia code in which iterators have sizes
+that can be inferred during compilation.
-These functions are guaranteed to be type-stable whenever they are given
-iterators with inferrable lengths and element types, including when
-- the iterators have many elements (e.g., more than 32, which is when `map`,
- `reduce`, and `mapreduce` tend to stop getting compiled efficiently)
-- the iterators have nonuniform element types (most functions from `Base` and
- `Base.Iterators` tend to encounter type-instabilities and allocations when
- this is the case, especially when there are more than 32 elements)
-- `f` and/or `op` recursively call the function to which they are passed, up to
- an arbitrarily large recursion depth (e.g., if `f` calls `map(f, itrs)`, it
- will be type-unstable when the recursion depth exceeds 2, but this will not be
- the case with `unrolled_map`)
+This package can be used with all such "statically sized" iterators, including
+those with elements of different types or those that are extremely long, both of
+which are cases that Julia's standard library often handles inefficiently.
-In addition, these functions have been written in a way that makes them very
-likely to get fully optimized out through constant propagation when the
-iterators have singleton element types (and when the result of calling `f`
-and/or `op` on these elements is inferrable). However, they can also be much
-more expensive to compile than their counterparts from `Base` and
-`Base.Iterators`, in which case they should not be used unless there is a clear
-performance benefit. Some notable exceptions to this are `unrolled_zip`,
-`unrolled_take`, and `unrolled_drop`, which tend to be easier to compile than
-`zip`, `Iterators.take`, `Iterators.drop`, and standard indexing notation.
-
-For a more precise indication of whether you should use `UnrolledUtilities`,
-please consult the autogenerated [Comparison Table](@ref). This table contains a
-comprehensive set of potential use cases, each with a measurement of performance
-optimization, the time required for compilation, and the memory usage during
-compilation. Most cases involve simple functions `f` and/or `op`, but the last
-few demonstrate the benefits of unrolling with non-trivial recursive functions.
-
-The rows of the table are highlighted as follows:
-- green indicates an improvement in performance and either no change in
- compilation or easier compilation (i.e., either similar or smaller values of
- compilation time and memory usage)
-- dark blue indicates an improvement in performance and harder compilation
- (i.e., larger values of compilation time and/or memory usage)
-- light blue indicates no change in performance and easier compilation
-- yellow indicates no change in performance and no change in compilation
-- magenta indicates no change in performance, an increase in compilation time,
- and a decrease in compilation memory usage
-- red indicates no change in performance and harder compilation
-
-Rows highlighted in green and blue present a clear advantage for unrolling,
-whereas those highlighted in yellow, magenta, and red either have no clear
-advantage, or they have a clear disadvantage. It is recommended that you only
-unroll when your use case is similar to a row in the first category.
-
-The table is also printed out by this package's unit tests, so these
-measurements can be compared across different operating systems by checking the
-[CI pipeline](https://github.com/CliMA/UnrolledUtilities.jl/actions/workflows/ci.yml).
+Some of this package's potential benefits for functions that use statically
+sized iterators include:
+- performance improvements
+ - reduced run times
+ - reduced compilation times
+ - reduced memory footprints
+- better support for "static compilation"
+ - compilation of [executables](https://github.com/tshort/StaticCompiler.jl)
+ - compilation of [GPU kernels](https://github.com/JuliaGPU/CUDA.jl)
diff --git a/docs/src/introduction.md b/docs/src/introduction.md
new file mode 100644
index 0000000..d32a18a
--- /dev/null
+++ b/docs/src/introduction.md
@@ -0,0 +1,174 @@
+```@setup inference_test
+using UnrolledUtilities, InteractiveUtils, Test
+```
+
+## Motivation
+
+Although the iteration utilities in `Base` and `Base.Iterators` are sufficiently
+performant for most common use cases, those who choose to dive into the world of
+low-level optimization will often discover
+[type instabilities](https://docs.julialang.org/en/v1/manual/faq/#man-type-stability)
+in unexpected situations. Here is a particularly simple example:
+
+```@repl inference_test
+Test.@inferred map(one, Tuple(1:31));
+Test.@inferred map(one, Tuple(1:32));
+```
+
+This type instability is present in all `map`s over iterators with lengths
+greater than 31, regardless of whether they are statically sized. As with most
+type instabilities in Julia, this leads to memory allocations every time `map`
+is called with sufficiently long iterators.
+
+[`Test.@inferred`](https://docs.julialang.org/en/v1/stdlib/Test/#Test.@inferred)
+is helpful for checking whether the return type of a function call is stable,
+but looking directly at the generated [LLVM](https://llvm.org/docs/LangRef.html)
+code reveals just how different the two function calls above are:
+
+```@repl inference_test
+@code_llvm map(one, Tuple(1:31))
+@code_llvm map(one, Tuple(1:32))
+```
+
+The type instability (and all of the resulting LLVM code complexity) in the
+second function call can be eliminated by replacing `map` with `unrolled_map`:
+
+```@repl inference_test
+Test.@inferred unrolled_map(one, Tuple(1:32));
+@code_llvm unrolled_map(one, Tuple(1:32))
+```
+
+The minimum iterator length for type instability is not always 32; for instance,
+it can also be 14:
+
+```@repl inference_test
+first_11(itr) = itr[1:11]
+Test.@inferred first_11(Tuple(1:13));
+Test.@inferred first_11(Tuple(1:14));
+```
+
+!!! note "Note"
+ #### Why is the function definition needed in this example?
+
+ On the first line of the example above, `[1:11]` is enclosed in a function
+ so that it does not get evaluated in global scope. This turns the range
+ `1:11` into a `Core.Const`, which the compiler can propagate into the call
+ to `getindex` so that it can infer the length of the result:
+
+ ```@setup first_11_code_warntype
+ using InteractiveUtils
+ first_11(itr) = itr[1:11]
+ ```
+
+ ```@repl first_11_code_warntype
+ @code_warntype first_11(Tuple(1:13))
+ ```
+
+ In contrast, running `Test.@inferred Tuple(1:13)[1:11]` amounts to checking
+ whether the compiler can compute how long the result of `getindex` will be
+ given only the argument types `NTuple{13, Int64}` and `UnitRange{Int64}`,
+ which is not possible:
+
+ ```@repl inference_test
+ Test.@inferred Tuple(1:13)[1:11];
+ ```
+
+Although `itr[1:10]` is always inferrable when `itr` is a `Tuple`, `itr[1:11]`
+has a type instability whenever `itr` contains more than 13 items. More
+generally, `itr[1:N]` seems to be unstable for all `N > 10` whenever `itr`
+contains more than `N + 2` items. This type instability can be fixed by
+replacing `getindex` with `unrolled_take`:
+
+```@repl inference_test
+unrolled_first_11(itr) = unrolled_take(itr, Val(11))
+Test.@inferred unrolled_first_11(Tuple(1:14));
+```
+
+Even when the final result of a function is inferred, there can be intermediate
+steps in the function with type instabilities that trigger allocations:
+
+```@repl inference_test
+function add_lengths(itr)
+ length_sum = 0
+ for n in 1:length(itr)
+ length_sum += length(itr[n])
+ end
+end
+Test.@inferred add_lengths(((1, 2), (1, 2, 3)))
+@allocated add_lengths(((1, 2), (1, 2, 3)))
+@code_warntype add_lengths(((1, 2), (1, 2, 3)))
+```
+
+The output of `@code_warntype` is quite cluttered, but the most important detail
+here is that the call to `getindex` does not get inferred because it can result
+in either a `Tuple` of length 2 or a `Tuple` of length 3. This type instability
+can be fixed by replacing `getindex` with `unrolled_applyat`:
+
+```@repl inference_test
+function unrolled_add_lengths(itr)
+ length_sum = 0
+ for n in 1:length(itr)
+ length_sum += unrolled_applyat(length, n, itr)
+ end
+end
+unrolled_add_lengths(((1, 2), (1, 2, 3))) # hide
+@allocated unrolled_add_lengths(((1, 2), (1, 2, 3)))
+@code_warntype unrolled_add_lengths(((1, 2), (1, 2, 3)))
+```
+
+For a detailed breakdown of when the tools provided by this package can improve
+performance, see the [User Guide](@ref "When to Use Unrolled Functions").
+
+## Package Features
+
+This package exports a number of analogues to functions from `Base` and
+`Base.Iterators`, each of which has been optimized for statically sized
+iterators (in terms of both performance and compilation time):
+- `unrolled_any(f, itr)` -- similar to `any`
+- `unrolled_all(f, itr)` -- similar to `all`
+- `unrolled_foreach(f, itrs...)` -- similar to `foreach`
+- `unrolled_map(f, itrs...)` -- similar to `map`
+- `unrolled_reduce(op, itr; [init])` -- similar to `reduce`
+- `unrolled_mapreduce(f, op, itrs...; [init])` -- similar to `mapreduce`
+- `unrolled_accumulate(op, itr; [init], [transform])` -- similar to `accumulate`,
+ but with a `transform` function applied to every accumulated value
+- `unrolled_push(itr, item)` -- similar to `push!`, but non-mutating
+- `unrolled_append(itr1, itr2)` -- similar to `append!`, but non-mutating
+- `unrolled_take(itr, ::Val{N})` -- similar to `Iterators.take` (and to
+ `itr[1:N]`), but with `N` wrapped in a `Val`
+- `unrolled_drop(itr, ::Val{N})` -- similar to `Iterators.drop` (and to
+ `itr[(N + 1):end]`), but with `N` wrapped in a `Val`
+- `unrolled_in(item, itr)` -- similar to `in`
+- `unrolled_unique(itr)` -- similar to `unique`
+- `unrolled_filter(f, itr)` -- similar to `filter`
+- `unrolled_flatten(itr)` -- similar to `Iterators.flatten`
+- `unrolled_flatmap(f, itrs...)` -- similar to `Iterators.flatmap`
+- `unrolled_product(itrs...)` -- similar to `Iterators.product`
+
+In addition, this package exports two functions that do not have public
+analogues in `Base` or `Base.Iterators`:
+- `unrolled_applyat(f, n, itrs...)` -- similar to
+ `f(itrs[1][n], itrs[2][n], ...)`, but with `Core.Const` indices in all calls
+ to `getindex`
+- `unrolled_split(f, itr)` -- similar to `(filter(f, itr), filter(!f, itr))`,
+ but without duplicate calls to `f`
+
+These functions are compatible with the following types of iterators:
+- statically sized iterators from `Base` (`Tuple` and `NamedTuple`)
+- statically sized iterators from
+ [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl) (`SVector`
+ and `MVector`)
+- lazy iterators from `Base` (the results of `enumerate`, `zip`,
+ `Iterators.map`, and generator expressions), but only when they are used as
+ wrappers for statically sized iterators
+
+They are also compatible with two new types of statically sized iterators
+exported by this package:
+- `StaticOneTo` -- similar to `Base.OneTo`
+- `StaticBitVector` -- similar to `BitVector`
+
+For more information regarding these new iterators, see the
+[User Guide](@ref "When to Use StaticOneTo or StaticBitVector").
+
+For a description of how to add compatibility with other types of iterators, see
+the [Developer Guide](@ref "Extending the Interface").
diff --git a/docs/src/user_guide.md b/docs/src/user_guide.md
new file mode 100644
index 0000000..ce252d2
--- /dev/null
+++ b/docs/src/user_guide.md
@@ -0,0 +1,191 @@
+```@meta
+CurrentModule = UnrolledUtilities
+```
+
+```@setup inference_test
+using UnrolledUtilities, InteractiveUtils, Test
+```
+
+## When to Use Unrolled Functions
+
+The functions exported by this package tend to perform better than their
+counterparts from `Base` and `Base.Iterators` in the scenarios listed below.
+Additional examples and more precise measurements can be found in several
+automatically generated tables of benchmarks, to which links have been provided.
+
+### Iterators with nonuniform element types
+
+- `in` has an intermediate type instability that triggers allocations for
+ nonuniform iterators
+
+ ```@repl inference_test
+ @allocated () in ((1, 2), (1, 2, 3))
+ @allocated unrolled_in((), ((1, 2), (1, 2, 3)))
+ ```
+
+- `getindex` has an unstable return type for variable (not `Core.Const`) indices
+ and nonuniform iterators, which can lead to intermediate type instabilities
+ that trigger allocations
+
+ ```@repl inference_test
+ function add_lengths(itr)
+ length_sum = 0
+ for n in 1:length(itr)
+ length_sum += length(itr[n])
+ end
+ end
+ add_lengths(((1, 2), (1, 2, 3))) # hide
+ @allocated add_lengths(((1, 2), (1, 2, 3)))
+ function unrolled_add_lengths(itr)
+ length_sum = 0
+ for n in 1:length(itr)
+ length_sum += unrolled_applyat(length, n, itr)
+ end
+ end
+ unrolled_add_lengths(((1, 2), (1, 2, 3))) # hide
+ @allocated unrolled_add_lengths(((1, 2), (1, 2, 3)))
+ ```
+
+ !!! note "Note"
+ #### How is `unrolled_applyat` stable if the index is not a `Core.Const`?
+
+ For the example of `add_lengths`, the compiler must infer the return
+ type of `itr[::Int64]` before it can compile the call to `length`.
+ Since this return type depends on the index `n`, the compiler needs to
+ insert a runtime lookup into the method table that determines which
+ method of `length` to call, `length(::Tuple{Int64, Int64})` or
+ `length(::Tuple{Int64, Int64, Int64})`, and this triggers allocations.
+
+ For the example of `unrolled_add_lengths`, the compiler instead infers
+ the return types of `itr[::Core.Const(1)]`, `itr[::Core.Const(2)]`,
+ and so on for every index into `itr`. Then, it compiles a call to
+ `length` for each of these return types, and it inserts a runtime
+ [switch instruction](https://llvm.org/docs/LangRef.html#switch-instruction)
+ that determines which result of `length` to return for a particular
+ value of `n`. As long as `length` itself only returns one type (in this
+ case, `Int64`), this ensures that `unrolled_add_lengths` has no
+ intermediate type instabilities.
+
+ In other words, `unrolled_applyat` combines multiple methods for `length`
+ and `getindex` into a single method, replacing the inefficient method
+ table lookup that switches between them with a simpler switch instruction.
+
+ !!! tip "Tip"
+ #### When should `getindex` be replaced with `unrolled_applyat`?
+
+ The specific example above could be simplified by using `mapreduce`,
+ instead of using a `for`-loop in conjunction with `unrolled_applyat`:
+
+ ```@repl
+ @allocated mapreduce(length, +, ((1, 2), (1, 2, 3)))
+ ```
+
+ However, there are often situations in which it is not possible to
+ replace loops with function calls, like when those loops are parallelized
+ over CPU or GPU threads. Moreover, CUDA is unable to compile any kernels
+ with type instabilities that trigger allocations, so `unrolled_applyat` is
+ *required* in order to parallelize over nonuniform iterators on GPUs.
+
+- For more examples where this package improves performance with nonuniform
+ iterators, see the benchmarks for [Individual Unrolled Functions](@ref) and
+ [Nested Unrolled Functions](@ref)
+
+### Iterators with large lengths
+
+- `map` has an unstable return type for iterators with lengths greater than 32
+
+ ```@repl inference_test
+ Test.@inferred map(one, Tuple(1:31));
+ Test.@inferred map(one, Tuple(1:32));
+ Test.@inferred unrolled_map(one, Tuple(1:32));
+ ```
+
+- `getindex` has an unstable return type for `Core.Const` slices of length
+ `N > 10` from iterators with lengths greater than `N + 2`
+
+ ```@repl inference_test
+ first_11(itr) = itr[1:11]
+ Test.@inferred first_11(Tuple(1:13));
+ Test.@inferred first_11(Tuple(1:14));
+ unrolled_first_11(itr) = unrolled_take(itr, Val(11))
+ Test.@inferred unrolled_first_11(Tuple(1:14));
+ ```
+
+- For more examples where this package improves performance with long iterators,
+ see the benchmarks for [Individual Unrolled Functions](@ref)
+
+### Reduction operations with nonuniform return types
+
+- `reduce` has an unstable return type when the return type of `op` is not
+ constant, but only for iterator lengths greater than 32
+
+ ```@repl inference_test
+ Test.@inferred reduce(tuple, Tuple(1:32));
+ Test.@inferred reduce(tuple, Tuple(1:33));
+ Test.@inferred unrolled_reduce(tuple, Tuple(1:33));
+ ```
+
+- `accumulate` also has an unstable return type in this scenario
+ ```@repl inference_test
+ Test.@inferred accumulate(tuple, Tuple(1:32));
+ Test.@inferred accumulate(tuple, Tuple(1:33));
+ Test.@inferred unrolled_accumulate(tuple, Tuple(1:33));
+ ```
+
+- For more examples where this package improves performance with nonuniform
+ reductions, see the benchmarks for [Individual Unrolled Functions](@ref)
+
+### Operations with more than 2 levels of recursion
+
+- All functions in Julia have a default "recursion limit" of 2; unless this
+ limit is modified, it forces any function that recursively calls itself 2 or
+ more times to have an unstable return type, regardless of how the function
+ is written
+
+ ```@repl inference_test
+ recursive_length(itr) =
+ eltype(itr) <: Tuple ? mapreduce(recursive_length, +, itr) : length(itr)
+ Test.@inferred recursive_length(((1, 2), (1, 2, 3)));
+ Test.@inferred recursive_length((((1,), (2,)), (1, 2, 3)));
+ unrolled_recursive_length(itr) =
+ eltype(itr) <: Tuple ?
+ unrolled_mapreduce(unrolled_recursive_length, +, itr) : length(itr)
+ Test.@inferred unrolled_recursive_length((((1,), (2,)), (1, 2, 3)));
+ ```
+
+ !!! note "Note"
+ #### Is there any other way around the recursion limit?
+
+ The default recursion limit applies to all functions defined in `Base` and
+ `Base.Iterators`, so those functions will have unstable return types for
+ more than 2 levels of recursion, even when all user-defined functions
+ passed to them have had their recursion limits disabled. It is also
+ impossible to disable the recursion limits of functions defined in `Base`
+ from external packages. This means that the only way to avoid the
+ recursion limit is to not use these functions, and instead to define
+ alternatives without any recursion limits.
+
+- For more examples where where this package improves performance with recursive
+ operations, see the benchmarks for [Recursive Unrolled Functions](@ref)
+
+## When to Use StaticOneTo or StaticBitVector
+
+### Constant iterators with very large lengths
+
+```@docs
+StaticOneTo
+```
+
+*TODO: Add an example and a relevant comparison table.*
+
+### Iterators with very large lengths that are modified across nested closures
+
+```@docs
+StaticBitVector
+```
+
+*TODO: Add an example.*
+
+For more examples where `StaticBitVector`s improve performance with very long
+iterators that are modified across nested closures, see the benchmarks for
+[Nested Unrolled Closures](@ref).
diff --git a/ext/UnrolledUtilitiesStaticArraysExt.jl b/ext/UnrolledUtilitiesStaticArraysExt.jl
new file mode 100644
index 0000000..67058a7
--- /dev/null
+++ b/ext/UnrolledUtilitiesStaticArraysExt.jl
@@ -0,0 +1,12 @@
+module UnrolledUtilitiesStaticArraysExt
+
+import UnrolledUtilities
+import StaticArrays: SVector, MVector
+
+@inline UnrolledUtilities.output_type_for_promotion(::SVector) = SVector
+@inline UnrolledUtilities.constructor_from_tuple(::Type{SVector}) = SVector
+
+@inline UnrolledUtilities.output_type_for_promotion(::MVector) = MVector
+@inline UnrolledUtilities.constructor_from_tuple(::Type{MVector}) = MVector
+
+end
diff --git a/logo-white.png b/logo-dark.png
similarity index 100%
rename from logo-white.png
rename to logo-dark.png
diff --git a/logo-dark.svg b/logo-dark.svg
new file mode 100644
index 0000000..c8c43ac
--- /dev/null
+++ b/logo-dark.svg
@@ -0,0 +1,131 @@
+
+
+
+
diff --git a/src/StaticBitVector.jl b/src/StaticBitVector.jl
new file mode 100644
index 0000000..68faea6
--- /dev/null
+++ b/src/StaticBitVector.jl
@@ -0,0 +1,150 @@
+"""
+ StaticBitVector{N, [U]}(f)
+ StaticBitVector{N, [U]}([bit])
+
+A statically sized analogue of `BitVector` with `Unsigned` chunks of type `U`,
+which can be constructed using either a function `f(n)` or a constant `bit`. By
+default, `U` is set to `UInt8` and `bit` is set to `false`.
+
+This iterator can only store `Bool`s, so its `output_type_for_promotion` is a
+`ConditionalOutputType`. Efficient methods are provided for `unrolled_map`,
+`unrolled_accumulate`, `unrolled_take`, and `unrolled_drop`, though the methods
+for `unrolled_map` and `unrolled_accumulate` only apply when their output's
+first item is a `Bool`. No other unrolled functions can use `StaticBitVector`s
+as output types.
+"""
+struct StaticBitVector{N, U <: Unsigned, I <: NTuple{<:Any, U}} <:
+ StaticSequence{N}
+ ints::I
+end
+@inline StaticBitVector{N, U}(ints) where {N, U} =
+ StaticBitVector{N, U, typeof(ints)}(ints)
+@inline StaticBitVector{N}(args...) where {N} =
+ StaticBitVector{N, UInt8}(args...)
+
+@inline function StaticBitVector{N, U}(bit::Bool = false) where {N, U}
+ n_bits_per_int = 8 * sizeof(U)
+ n_ints = cld(N, n_bits_per_int)
+ ints = ntuple(Returns(bit ? ~zero(U) : zero(U)), Val(n_ints))
+ return StaticBitVector{N, U}(ints)
+end
+
+@inline function StaticBitVector{N, U}(f::Function) where {N, U}
+ n_bits_per_int = 8 * sizeof(U)
+ n_ints = cld(N, n_bits_per_int)
+ ints = ntuple(Val(n_ints)) do int_index
+ @inline
+ first_index = n_bits_per_int * (int_index - 1) + 1
+ unrolled_reduce(
+ StaticOneTo(min(n_bits_per_int, N - first_index + 1));
+ init = zero(U),
+ ) do int, bit_index
+ @inline
+ bit_offset = bit_index - 1
+ int | U(f(first_index + bit_offset)::Bool) << bit_offset
+ end
+ end
+ return StaticBitVector{N, U}(ints)
+end
+
+@inline function int_index_and_bit_offset(::Type{U}, n) where {U}
+ int_offset, bit_offset = divrem(n - 1, 8 * sizeof(U))
+ return (int_offset + 1, bit_offset)
+end
+
+@inline function generic_getindex(
+ itr::StaticBitVector{<:Any, U},
+ n::Integer,
+) where {U}
+ int_index, bit_offset = int_index_and_bit_offset(U, n)
+ int = itr.ints[int_index]
+ return Bool(int >> bit_offset & one(int))
+end
+
+@inline function Base.setindex(
+ itr::StaticBitVector{N, U},
+ bit::Bool,
+ n::Integer,
+) where {N, U}
+ int_index, bit_offset = int_index_and_bit_offset(U, n)
+ int = itr.ints[int_index]
+ int′ = int & ~(one(int) << bit_offset) | U(bit) << bit_offset
+ ints = Base.setindex(itr.ints, int′, int_index)
+ return StaticBitVector{N, U}(ints)
+end
+
+@inline output_type_for_promotion(::StaticBitVector{<:Any, U}) where {U} =
+ ConditionalOutputType(Bool, StaticBitVector{<:Any, U})
+
+@inline unrolled_map_into(::Type{StaticBitVector{<:Any, U}}, f, itr) where {U} =
+ StaticBitVector{length(itr), U}(
+ Base.Fix1(generic_getindex, Iterators.map(f, itr)),
+ )
+
+@inline function unrolled_accumulate_into(
+ ::Type{StaticBitVector{<:Any, U}},
+ op,
+ itr,
+ init,
+ transform,
+) where {U}
+ N = length(itr)
+ n_bits_per_int = 8 * sizeof(U)
+ n_ints = cld(N, n_bits_per_int)
+ ints = unrolled_accumulate_into_tuple(
+ StaticOneTo(n_ints);
+ init = (nothing, init),
+ transform = first,
+ ) do (_, init_value_for_new_int), int_index
+ @inline
+ first_index = n_bits_per_int * (int_index - 1) + 1
+ unrolled_reduce(
+ StaticOneTo(min(n_bits_per_int, N - first_index + 1));
+ init = (zero(U), init_value_for_new_int),
+ ) do (int, prev_value), bit_index
+ @inline
+ bit_offset = bit_index - 1
+ item = generic_getindex(itr, first_index + bit_offset)
+ new_value =
+ first_index + bit_offset == 1 && prev_value isa NoInit ?
+ item : op(prev_value, item)
+ (int | U(transform(new_value)::Bool) << bit_offset, new_value)
+ end
+ end
+ return StaticBitVector{N, U}(ints)
+end
+
+# TODO: Add unrolled_push and unrolled_append
+
+@inline function unrolled_take(
+ itr::StaticBitVector{<:Any, U},
+ ::Val{N},
+) where {N, U}
+ n_bits_per_int = 8 * sizeof(U)
+ n_ints = cld(N, n_bits_per_int)
+ ints = unrolled_take(itr.ints, Val(n_ints))
+ return StaticBitVector{N, U}(ints)
+end
+
+@inline function unrolled_drop(
+ itr::StaticBitVector{N_old, U},
+ ::Val{N},
+) where {N_old, N, U}
+ n_bits_per_int = 8 * sizeof(U)
+ n_ints = cld(N_old - N, n_bits_per_int)
+ n_dropped_ints = length(itr.ints) - n_ints
+ bit_offset = N - n_bits_per_int * n_dropped_ints
+ ints_without_offset = unrolled_drop(itr.ints, Val(n_dropped_ints))
+ ints = if bit_offset == 0
+ ints_without_offset
+ else
+ cur_ints = ints_without_offset
+ next_ints = unrolled_push(unrolled_drop(cur_ints, Val(1)), nothing)
+ unrolled_map_into_tuple(cur_ints, next_ints) do cur_int, next_int
+ @inline
+ isnothing(next_int) ? cur_int >> bit_offset :
+ cur_int >> bit_offset | next_int << (n_bits_per_int - bit_offset)
+ end
+ end
+ return StaticBitVector{N_old - N, U}(ints)
+end
diff --git a/src/StaticOneTo.jl b/src/StaticOneTo.jl
new file mode 100644
index 0000000..3a80b0d
--- /dev/null
+++ b/src/StaticOneTo.jl
@@ -0,0 +1,18 @@
+"""
+ StaticOneTo(N)
+
+A lazy and statically sized analogue of `Base.OneTo(N)`.
+
+This iterator can only store the integers from 1 to `N`, so its
+`output_type_for_promotion` is `NoOutputType()`. An efficient method is provided
+for `unrolled_take`, but no other unrolled functions can use `StaticOneTo`s as
+output types.
+"""
+struct StaticOneTo{N} <: StaticSequence{N} end
+@inline StaticOneTo(N) = StaticOneTo{N}()
+
+@inline generic_getindex(::StaticOneTo, n) = n
+
+@inline output_type_for_promotion(::StaticOneTo) = NoOutputType()
+
+@inline unrolled_take(::StaticOneTo, ::Val{N}) where {N} = StaticOneTo(N)
diff --git a/src/UnrolledUtilities.jl b/src/UnrolledUtilities.jl
index dc69559..52f1674 100644
--- a/src/UnrolledUtilities.jl
+++ b/src/UnrolledUtilities.jl
@@ -4,10 +4,14 @@ export unrolled_any,
unrolled_all,
unrolled_foreach,
unrolled_map,
+ unrolled_applyat,
unrolled_reduce,
unrolled_mapreduce,
- unrolled_zip,
- unrolled_enumerate,
+ unrolled_accumulate,
+ unrolled_push,
+ unrolled_append,
+ unrolled_take,
+ unrolled_drop,
unrolled_in,
unrolled_unique,
unrolled_filter,
@@ -15,114 +19,144 @@ export unrolled_any,
unrolled_flatten,
unrolled_flatmap,
unrolled_product,
- unrolled_applyat,
- unrolled_take,
- unrolled_drop
-
-inferred_length(::Type{<:NTuple{N, Any}}) where {N} = N
-# We could also add support for statically-sized iterators that are not Tuples.
-
-f_exprs(itr_type) = (:(f(itr[$n])) for n in 1:inferred_length(itr_type))
-@inline @generated unrolled_any(f, itr) = Expr(:||, f_exprs(itr)...)
-@inline @generated unrolled_all(f, itr) = Expr(:&&, f_exprs(itr)...)
-
-function zipped_f_exprs(itr_types)
- L = length(itr_types)
- L == 0 && error("unrolled functions need at least one iterator as input")
- N = minimum(inferred_length, itr_types)
- return (:(f($((:(itrs[$l][$n]) for l in 1:L)...))) for n in 1:N)
-end
-@inline @generated unrolled_foreach(f, itrs...) =
- Expr(:block, zipped_f_exprs(itrs)..., nothing)
-@inline @generated unrolled_map(f, itrs...) =
- Expr(:tuple, zipped_f_exprs(itrs)...)
-
-function nested_op_expr(itr_type)
- N = inferred_length(itr_type)
- N == 0 && error("unrolled_reduce needs an `init` value for empty iterators")
- item_exprs = (:(itr[$n]) for n in 1:N)
- return reduce((expr1, expr2) -> :(op($expr1, $expr2)), item_exprs)
-end
-@inline @generated unrolled_reduce_without_init(op, itr) = nested_op_expr(itr)
-
-struct NoInit end
+ StaticOneTo,
+ StaticBitVector
+
+include("unrollable_iterator_interface.jl")
+include("recursively_unrolled_functions.jl")
+include("generatively_unrolled_functions.jl")
+
+struct NoInit end # Analogue of Base._InitialValue for reduction/accumulation.
+
+@inline unrolled_any(f, itr) =
+ (rec_unroll(itr) ? rec_unrolled_any : gen_unrolled_any)(f, itr)
+@inline unrolled_any(itr) = unrolled_any(identity, itr)
+
+@inline unrolled_all(f, itr) =
+ (rec_unroll(itr) ? rec_unrolled_all : gen_unrolled_all)(f, itr)
+@inline unrolled_all(itr) = unrolled_all(identity, itr)
+
+@inline unrolled_foreach(f, itr) =
+ (rec_unroll(itr) ? rec_unrolled_foreach : gen_unrolled_foreach)(f, itr)
+@inline unrolled_foreach(f, itrs...) = unrolled_foreach(splat(f), zip(itrs...))
+
+@inline unrolled_map_into_tuple(f, itr) =
+ (rec_unroll(itr) ? rec_unrolled_map : gen_unrolled_map)(f, itr)
+@inline unrolled_map_into(output_type, f, itr) =
+ constructor_from_tuple(output_type)(unrolled_map_into_tuple(f, itr))
+@inline unrolled_map(f, itr) =
+ unrolled_map_into(inferred_output_type(Iterators.map(f, itr)), f, itr)
+@inline unrolled_map(f, itrs...) = unrolled_map(splat(f), zip(itrs...))
+
+@inline unrolled_applyat(f, n, itr) =
+ (rec_unroll(itr) ? rec_unrolled_applyat : gen_unrolled_applyat)(f, n, itr)
+@inline unrolled_applyat(f, n, itrs...) =
+ unrolled_applyat(splat(f), n, zip(itrs...))
+@inline unrolled_applyat_bounds_error() =
+ error("unrolled_applyat has detected an out-of-bounds index")
+
+@inline unrolled_reduce(op, itr, init) =
+ (rec_unroll(itr) ? rec_unrolled_reduce : gen_unrolled_reduce)(op, itr, init)
@inline unrolled_reduce(op, itr; init = NoInit()) =
- unrolled_reduce_without_init(op, init isa NoInit ? itr : (init, itr...))
+ isempty(itr) && init isa NoInit ?
+ error("unrolled_reduce requires an init value for empty iterators") :
+ unrolled_reduce(op, itr, init)
@inline unrolled_mapreduce(f, op, itrs...; init = NoInit()) =
- unrolled_reduce(op, unrolled_map(f, itrs...); init)
+ unrolled_reduce(op, Iterators.map(f, itrs...), init)
+
+@inline unrolled_accumulate_into_tuple(op, itr, init, transform) =
+ (rec_unroll(itr) ? rec_unrolled_accumulate : gen_unrolled_accumulate)(
+ op,
+ itr,
+ init,
+ transform,
+ )
+@inline unrolled_accumulate_into(output_type, op, itr, init, transform) =
+ constructor_from_tuple(output_type)(
+ unrolled_accumulate_into_tuple(op, itr, init, transform),
+ )
+@inline unrolled_accumulate(op, itr; init = NoInit(), transform = identity) =
+ unrolled_accumulate_into(
+ accumulate_output_type(op, itr, init, transform),
+ op,
+ itr,
+ init,
+ transform,
+ )
-@inline unrolled_zip(itrs...) = unrolled_map(tuple, itrs...)
+@inline unrolled_push(itr, item) =
+ inferred_constructor_from_tuple(itr)((itr..., item))
-@inline unrolled_enumerate(itrs...) =
- unrolled_zip(ntuple(identity, Val(length(itrs[1]))), itrs...)
+@inline unrolled_append_into(output_type, itr1, itr2) =
+ constructor_from_tuple(output_type)((itr1..., itr2...))
+@inline unrolled_append(itr1, itr2) =
+ unrolled_append_into(promoted_output_type((itr1, itr2)), itr1, itr2)
+
+@inline unrolled_take(itr, ::Val{N}) where {N} =
+ inferred_constructor_from_tuple(itr)(
+ ntuple(Base.Fix1(generic_getindex, itr), Val(N)),
+ )
+
+@inline unrolled_drop(itr, ::Val{N}) where {N} =
+ inferred_constructor_from_tuple(itr)(
+ ntuple(n -> generic_getindex(itr, N + n), Val(length(itr) - N)),
+ )
@inline unrolled_in(item, itr) = unrolled_any(Base.Fix1(===, item), itr)
# Using === instead of == or isequal improves type stability for singletons.
@inline unrolled_unique(itr) =
- unrolled_reduce(itr; init = ()) do unique_items, item
+ unrolled_reduce(itr; init = inferred_empty(itr)) do unique_items, item
@inline
- unrolled_in(item, unique_items) ? unique_items : (unique_items..., item)
+ unrolled_in(item, unique_items) ? unique_items :
+ unrolled_push(unique_items, item)
end
@inline unrolled_filter(f, itr) =
- unrolled_reduce(itr; init = ()) do filtered_items, item
+ unrolled_reduce(itr; init = inferred_empty(itr)) do items_with_true_f, item
@inline
- f(item) ? (filtered_items..., item) : filtered_items
+ f(item) ? unrolled_push(items_with_true_f, item) : items_with_true_f
end
@inline unrolled_split(f, itr) =
- unrolled_reduce(itr; init = ((), ())) do (f_items, not_f_items), item
+ unrolled_reduce(
+ itr;
+ init = (inferred_empty(itr), inferred_empty(itr)),
+ ) do (items_with_true_f, items_with_false_f), item
@inline
- f(item) ? ((f_items..., item), not_f_items) :
- (f_items, (not_f_items..., item))
+ f(item) ? (unrolled_push(items_with_true_f, item), items_with_false_f) :
+ (items_with_true_f, unrolled_push(items_with_false_f, item))
end
@inline unrolled_flatten(itr) =
- unrolled_reduce((item1, item2) -> (item1..., item2...), itr; init = ())
+ unrolled_reduce(unrolled_append, itr; init = promoted_empty(itr))
@inline unrolled_flatmap(f, itrs...) =
- unrolled_flatten(unrolled_map(f, itrs...))
+ unrolled_flatten(Iterators.map(f, itrs...))
@inline unrolled_product(itrs...) =
- unrolled_reduce(itrs; init = ((),)) do product_itr, itr
+ unrolled_reduce(itrs; init = (promoted_empty(itrs),)) do product_itr, itr
@inline
unrolled_flatmap(itr) do item
@inline
- unrolled_map(product_tuple -> (product_tuple..., item), product_itr)
+ unrolled_map_into_tuple(Base.Fix2(unrolled_push, item), product_itr)
end
end
-@inline unrolled_applyat(f, n, itrs...) = unrolled_foreach(
- (i, items...) -> i == n && f(items...),
- unrolled_enumerate(itrs...),
-)
+abstract type StaticSequence{N} end
-@inline unrolled_take(itr, ::Val{N}) where {N} = ntuple(i -> itr[i], Val(N))
-@inline unrolled_drop(itr, ::Val{N}) where {N} =
- ntuple(i -> itr[N + i], Val(length(itr) - N))
-# When its second argument is a Val, ntuple is unrolled via Base.@ntuple.
-
-@static if hasfield(Method, :recursion_relation)
- # Remove recursion limits for functions whose arguments are also functions.
- for func in (
- unrolled_any,
- unrolled_all,
- unrolled_foreach,
- unrolled_map,
- unrolled_reduce_without_init,
- unrolled_reduce,
- unrolled_mapreduce,
- unrolled_filter,
- unrolled_split,
- unrolled_flatmap,
- unrolled_applyat,
- )
- for method in methods(func)
- method.recursion_relation = (_...) -> true
- end
- end
-end
+@inline Base.length(::StaticSequence{N}) where {N} = N
+@inline Base.firstindex(::StaticSequence) = 1
+@inline Base.lastindex(itr::StaticSequence) = length(itr)
+@inline Base.getindex(itr::StaticSequence, n::Integer) =
+ generic_getindex(itr, n)
+@inline Base.iterate(itr::StaticSequence, n = 1) =
+ n > length(itr) ? nothing : (generic_getindex(itr, n), n + 1)
+
+include("StaticOneTo.jl")
+include("StaticBitVector.jl")
+
+include("recursion_limits.jl") # This must be included at the end of the module.
end
diff --git a/src/generatively_unrolled_functions.jl b/src/generatively_unrolled_functions.jl
new file mode 100644
index 0000000..ce6bc42
--- /dev/null
+++ b/src/generatively_unrolled_functions.jl
@@ -0,0 +1,80 @@
+f_exprs(N) = (:(f(generic_getindex(itr, $n))) for n in 1:N)
+
+@inline @generated _gen_unrolled_any(::Val{N}, f, itr) where {N} =
+ Expr(:||, f_exprs(N)...)
+@inline gen_unrolled_any(f, itr) = _gen_unrolled_any(Val(length(itr)), f, itr)
+
+@inline @generated _gen_unrolled_all(::Val{N}, f, itr) where {N} =
+ Expr(:&&, f_exprs(N)...)
+@inline gen_unrolled_all(f, itr) = _gen_unrolled_all(Val(length(itr)), f, itr)
+
+@inline @generated _gen_unrolled_foreach(::Val{N}, f, itr) where {N} =
+ Expr(:block, f_exprs(N)..., nothing)
+@inline gen_unrolled_foreach(f, itr) =
+ _gen_unrolled_foreach(Val(length(itr)), f, itr)
+
+@inline @generated _gen_unrolled_map(::Val{N}, f, itr) where {N} =
+ Expr(:tuple, f_exprs(N)...)
+@inline gen_unrolled_map(f, itr) = _gen_unrolled_map(Val(length(itr)), f, itr)
+
+@inline @generated _gen_unrolled_applyat(::Val{N}, f, n, itr) where {N} = Expr(
+ :block,
+ map(((n, expr),) -> :(n == $n && return $expr), enumerate(f_exprs(N)))...,
+ :(unrolled_applyat_bounds_error()),
+) # This block gets optimized into a switch instruction during LLVM codegen.
+@inline gen_unrolled_applyat(f, n, itr) =
+ _gen_unrolled_applyat(Val(length(itr)), f, n, itr)
+
+function nested_op_expr(N, init_type)
+ init_expr = init_type <: NoInit ? :(generic_getindex(itr, 1)) : :init
+ n_range = init_type <: NoInit ? (2:N) : (1:N)
+ return foldl(n_range; init = init_expr) do prev_op_expr, n
+ :(op($prev_op_expr, generic_getindex(itr, $n)))
+ end # Use foldl instead of reduce to guarantee left associativity.
+end
+
+@inline @generated _gen_unrolled_reduce(::Val{N}, op, itr, init) where {N} =
+ nested_op_expr(N, init)
+@inline gen_unrolled_reduce(op, itr, init) =
+ _gen_unrolled_reduce(Val(length(itr)), op, itr, init)
+
+function transformed_sequential_op_exprs(N, init_type)
+ first_item_expr = :(generic_getindex(itr, 1))
+ init_expr =
+ init_type <: NoInit ? first_item_expr : :(op(init, $first_item_expr))
+ transformed_exprs_and_next_op_exprs =
+ accumulate(1:N; init = (nothing, init_expr)) do (_, prev_op_expr), n
+ var = gensym()
+ next_op_expr = :(op($var, generic_getindex(itr, $(n + 1))))
+ (:($var = $prev_op_expr; transform($var)), next_op_expr)
+ end
+ return map(first, transformed_exprs_and_next_op_exprs)
+end
+
+@inline @generated _gen_unrolled_accumulate(
+ ::Val{N},
+ op,
+ itr,
+ init,
+ transform,
+) where {N} = Expr(:tuple, transformed_sequential_op_exprs(N, init)...)
+@inline gen_unrolled_accumulate(op, itr, init, transform) =
+ _gen_unrolled_accumulate(Val(length(itr)), op, itr, init, transform)
+
+################################################################################
+
+# NOTE: The following is experimental and will likely be removed in the future.
+
+@inline @generated function val_unrolled_reduce(op, ::Val{N}, init) where {N}
+ item_exprs = init <: NoInit ? (1:N) : (:init, 1:N...)
+ return foldl(item_exprs) do prev_op_expr, item_expr
+ :(op($prev_op_expr, $item_expr))
+ end # Use foldl instead of reduce to guarantee left associativity.
+end
+@inline unrolled_reduce(op, ::Val{N}, init) where {N} =
+ val_unrolled_reduce(op, Val(N), init)
+
+# TODO: Determine why unrolled_reduce(op, Val(N), init) compiles faster than
+# unrolled_reduce(op, StaticOneTo(N), init) for the non-orographic gravity wave
+# parametrization test in ClimaAtmos, to the point where the StaticOneTo version
+# seems to hang while the Val version compiles in under a second.
diff --git a/src/recursion_limits.jl b/src/recursion_limits.jl
new file mode 100644
index 0000000..9f9c279
--- /dev/null
+++ b/src/recursion_limits.jl
@@ -0,0 +1,56 @@
+# Remove recursion limits from functions that call themselves, and also from all
+# functions whose arguments can be arbitrary functions (including themselves).
+@static if hasfield(Method, :recursion_relation)
+ for func in (
+ generic_getindex,
+ output_type_for_promotion,
+ _rec_unrolled_any,
+ _rec_unrolled_all,
+ _rec_unrolled_foreach,
+ _rec_unrolled_map,
+ _rec_unrolled_applyat,
+ _rec_unrolled_reduce,
+ _rec_unrolled_accumulate,
+ rec_unrolled_any,
+ rec_unrolled_all,
+ rec_unrolled_foreach,
+ rec_unrolled_map,
+ rec_unrolled_applyat,
+ rec_unrolled_reduce,
+ rec_unrolled_accumulate,
+ _gen_unrolled_any,
+ _gen_unrolled_all,
+ _gen_unrolled_foreach,
+ _gen_unrolled_map,
+ _gen_unrolled_applyat,
+ _gen_unrolled_reduce,
+ _gen_unrolled_accumulate,
+ gen_unrolled_any,
+ gen_unrolled_all,
+ gen_unrolled_foreach,
+ gen_unrolled_map,
+ gen_unrolled_applyat,
+ gen_unrolled_reduce,
+ gen_unrolled_accumulate,
+ val_unrolled_reduce,
+ unrolled_any,
+ unrolled_all,
+ unrolled_foreach,
+ unrolled_map_into_tuple,
+ unrolled_map_into,
+ unrolled_map,
+ unrolled_applyat,
+ unrolled_reduce,
+ unrolled_mapreduce,
+ unrolled_accumulate_into_tuple,
+ unrolled_accumulate_into,
+ unrolled_accumulate,
+ unrolled_filter,
+ unrolled_split,
+ unrolled_flatmap,
+ )
+ for method in methods(func)
+ method.recursion_relation = Returns(true)
+ end
+ end
+end
diff --git a/src/recursively_unrolled_functions.jl b/src/recursively_unrolled_functions.jl
new file mode 100644
index 0000000..db88bef
--- /dev/null
+++ b/src/recursively_unrolled_functions.jl
@@ -0,0 +1,47 @@
+@inline _rec_unrolled_any(f) = false
+@inline _rec_unrolled_any(f, item, items...) =
+ f(item) || _rec_unrolled_any(f, items...)
+@inline rec_unrolled_any(f, itr) = _rec_unrolled_any(f, itr...)
+
+@inline _rec_unrolled_all(f) = true
+@inline _rec_unrolled_all(f, item, items...) =
+ f(item) && _rec_unrolled_all(f, items...)
+@inline rec_unrolled_all(f, itr) = _rec_unrolled_all(f, itr...)
+
+@inline _rec_unrolled_foreach(f) = nothing
+@inline _rec_unrolled_foreach(f, item, items...) =
+ (f(item); _rec_unrolled_foreach(f, items...))
+@inline rec_unrolled_foreach(f, itr) = _rec_unrolled_foreach(f, itr...)
+
+@inline _rec_unrolled_map(f) = ()
+@inline _rec_unrolled_map(f, item, items...) =
+ (f(item), _rec_unrolled_map(f, items...)...)
+@inline rec_unrolled_map(f, itr) = _rec_unrolled_map(f, itr...)
+
+@inline _rec_unrolled_applyat(f, offset_n) = unrolled_applyat_bounds_error()
+@inline _rec_unrolled_applyat(f, offset_n, item, items...) =
+ offset_n == 1 ? f(item) : _rec_unrolled_applyat(f, offset_n - 1, items...)
+@inline rec_unrolled_applyat(f, n, itr) = _rec_unrolled_applyat(f, n, itr...)
+
+@inline _rec_unrolled_reduce(op, prev_value) = prev_value
+@inline _rec_unrolled_reduce(op, prev_value, item, items...) =
+ _rec_unrolled_reduce(op, op(prev_value, item), items...)
+@inline rec_unrolled_reduce(op, itr, init) =
+ init isa NoInit ? _rec_unrolled_reduce(op, itr...) :
+ _rec_unrolled_reduce(op, init, itr...)
+
+@inline _rec_unrolled_accumulate(op, transform, prev_value) =
+ (transform(prev_value),)
+@inline _rec_unrolled_accumulate(op, transform, prev_value, item, items...) = (
+ transform(prev_value),
+ _rec_unrolled_accumulate(op, transform, op(prev_value, item), items...)...,
+)
+@inline rec_unrolled_accumulate(op, itr, init, transform) =
+ isempty(itr) ? () :
+ init isa NoInit ? _rec_unrolled_accumulate(op, transform, itr...) :
+ _rec_unrolled_accumulate(
+ op,
+ transform,
+ op(init, generic_getindex(itr, 1)),
+ unrolled_drop(itr, Val(1))...,
+ )
diff --git a/src/unrollable_iterator_interface.jl b/src/unrollable_iterator_interface.jl
new file mode 100644
index 0000000..f667ff8
--- /dev/null
+++ b/src/unrollable_iterator_interface.jl
@@ -0,0 +1,203 @@
+"""
+ rec_unroll(itr)
+
+Whether to use recursive loop unrolling instead of generative loop unrolling for
+the iterator `itr`.
+
+In general, recursive loop unrolling is faster to compile for small iterators,
+but it becomes extremely slow to compile for large iterators, and it usually
+generates suboptimal LLVM code for large iterators. On the other hand,
+generative loop unrolling is slow to compile for small iterators, but its
+compilation time does not grow as rapidly with respect to iterator size, and it
+always generates optimal LLVM code. The default is currently to use recursive
+unrolling for iterator lengths up to 16, and to use generative unrolling for
+larger iterators.
+"""
+@inline rec_unroll(itr) = length(itr) <= 16
+
+"""
+ generic_getindex(itr, n)
+
+An alternative to `Base.getindex` that can also handle internal types such as
+`Base.Generator` and `Base.Iterators.Enumerate`. Defaults to `Base.getindex`.
+"""
+@inline generic_getindex(itr, n) = getindex(itr, n)
+@inline generic_getindex(itr::Base.Generator, n) =
+ itr.f(generic_getindex(itr.iter, n))
+@inline generic_getindex(itr::Base.Iterators.Enumerate, n) =
+ (n, generic_getindex(itr.itr, n))
+@inline generic_getindex(itr::Base.Iterators.Zip, n) =
+ unrolled_map_into_tuple(Base.Fix2(generic_getindex, n), itr.is)
+
+@inline first_item_type(itr) =
+ Base.promote_op(Base.Fix2(generic_getindex, 1), typeof(itr))
+@inline second_item_type(itr) =
+ Base.promote_op(Base.Fix2(generic_getindex, 2), typeof(itr))
+
+"""
+ output_type_for_promotion(itr)
+
+The type of output that unrolled functions should try to generate for the input
+iterator `itr`, or a `ConditionalOutputType` if the output type depends on the
+type of items that need to be stored in it, or `NoOutputType()` if `itr` is a
+lazy iterator without any associated output type. Defaults to `Tuple`.
+"""
+@inline output_type_for_promotion(_) = Tuple
+@inline output_type_for_promotion(::NamedTuple{names}) where {names} =
+ NamedTuple{names}
+@inline output_type_for_promotion(itr::Base.Generator) =
+ output_type_for_promotion(itr.iter)
+@inline output_type_for_promotion(itr::Base.Iterators.Enumerate) =
+ output_type_for_promotion(itr.itr)
+@inline output_type_for_promotion(itr::Base.Iterators.Zip) =
+ maybe_ambiguous_promoted_output_type(itr.is)
+
+"""
+ AmbiguousOutputType
+
+The result of `output_type_for_promotion` for iterators that do not have
+well-defined output types.
+"""
+abstract type AmbiguousOutputType end
+
+"""
+ NoOutputType()
+
+The `AmbiguousOutputType` of lazy iterators.
+"""
+struct NoOutputType <: AmbiguousOutputType end
+
+"""
+ ConditionalOutputType(allowed_item_type, output_type, [fallback_type])
+
+An `AmbiguousOutputType` that can have one of two possible values. If the first
+item in the output is a subtype of `allowed_item_type`, the output will have the
+type `output_type`; otherwise, it will have the type `fallback_type`, which is
+set to `Tuple` by default.
+"""
+struct ConditionalOutputType{I, O, O′} <: AmbiguousOutputType end
+@inline ConditionalOutputType(
+ allowed_item_type::Type,
+ output_type::Type,
+ fallback_type::Type = Tuple,
+) = ConditionalOutputType{allowed_item_type, output_type, fallback_type}()
+
+@inline unambiguous_output_type(_, ::Type{O}) where {O} = O
+@inline unambiguous_output_type(_, ::NoOutputType) = Tuple
+@inline unambiguous_output_type(
+ get_first_item_type,
+ ::ConditionalOutputType{I, O, O′},
+) where {I, O, O′} = get_first_item_type() <: I ? O : O′
+
+"""
+ output_promote_rule(output_type1, output_type2)
+
+The type of output that should be generated when two iterators do not have the
+same `output_type_for_promotion`, or `Union{}` if these iterators should not be
+used together. Only one method of `output_promote_rule` needs to be defined for
+any pair of output types.
+
+By default, all types take precedence over `NoOutputType()`, and the conditional
+part of any `ConditionalOutputType` takes precedence over an unconditional type
+(so that only the `fallback_type` of any conditional type gets promoted). The
+default result for all other pairs of unequal output types is `Union{}`.
+"""
+@inline output_promote_rule(_, _) = Union{}
+@inline output_promote_rule(::Type{O}, ::Type{O}) where {O} = O
+@inline output_promote_rule(::NoOutputType, output_type) = output_type
+@inline output_promote_rule(
+ ::ConditionalOutputType{I, O, O′},
+ ::Type{O′′},
+) where {I, O, O′, O′′} =
+ ConditionalOutputType(I, O, output_promote_rule(O′, O′′))
+@inline output_promote_rule(
+ ::Type{O′},
+ ::ConditionalOutputType{I, O, O′′},
+) where {I, O, O′, O′′} =
+ ConditionalOutputType(I, O, output_promote_rule(O′, O′′))
+@inline output_promote_rule(
+ ::ConditionalOutputType{I, O, O′},
+ ::ConditionalOutputType{I, O, O′′},
+) where {I, O, O′, O′′} =
+ ConditionalOutputType(I, O, output_promote_rule(O′, O′′))
+
+@inline function output_promote_result(O1, O2)
+ O12 = output_promote_rule(O1, O2)
+ O21 = output_promote_rule(O2, O1)
+ O12 == O21 == Union{} &&
+ error("output_promote_rule is undefined for $O1 and $O2")
+ (O12 == O21 || O21 == Union{}) && return O12
+ O12 == Union{} && return O21
+ error("output_promote_rule yields inconsistent results for $O1 and $O2: \
+ $O12 for $O1 followed by $O2, versus $O21 for $O2 followed by $O1")
+end
+
+@inline maybe_ambiguous_promoted_output_type(itrs) =
+ isempty(itrs) ? Tuple : # Generate a Tuple when given 0 inputs.
+ unrolled_mapreduce(output_type_for_promotion, output_promote_result, itrs)
+
+@inline inferred_output_type(itr) =
+ unambiguous_output_type(output_type_for_promotion(itr)) do
+ @inline
+ first_item_type(itr)
+ end
+@inline inferred_output_type(itr::Base.Generator) =
+ unambiguous_output_type(output_type_for_promotion(itr.iter)) do
+ @inline
+ Base.promote_op(itr.f, first_item_type(itr.iter))
+ end
+@inline inferred_output_type(itr::Base.Iterators.Enumerate) =
+ unambiguous_output_type(output_type_for_promotion(itr.itr)) do
+ @inline
+ Tuple{Int, first_item_type(itr.itr)}
+ end
+@inline inferred_output_type(itr::Base.Iterators.Zip) =
+ unambiguous_output_type(maybe_ambiguous_promoted_output_type(itr.is)) do
+ @inline
+ Tuple{unrolled_map_into_tuple(first_item_type, itr.is)...}
+ end
+
+@inline promoted_output_type(itrs) =
+ unambiguous_output_type(maybe_ambiguous_promoted_output_type(itrs)) do
+ @inline
+ first_item_type(generic_getindex(itrs, 1))
+ end
+
+@inline accumulate_output_type(op, itr, init, transform) =
+ unambiguous_output_type(output_type_for_promotion(itr)) do
+ @inline
+ no_init = init isa NoInit
+ arg1_type = no_init ? first_item_type(itr) : typeof(init)
+ arg2_type = no_init ? second_item_type(itr) : first_item_type(itr)
+ Base.promote_op(transform, Base.promote_op(op, arg1_type, arg2_type))
+ end
+
+"""
+ constructor_from_tuple(output_type)
+
+A function that can be used to efficiently construct an output of type
+`output_type` from a `Tuple`, or `identity` if such an output should not be
+constructed from a `Tuple`. Defaults to `identity`, which also handles the case
+where `output_type` is already `Tuple`. The `output_type` here is guaranteed to
+be a `Type`, rather than a `ConditionalOutputType` or `NoOutputType`.
+
+Many statically sized iterators (e.g., `SVector`s) are essentially wrappers for
+`Tuple`s, and their constructors for `Tuple`s can be reduced to no-ops. The main
+exceptions are [`StaticOneTo`](@ref UnrolledUtilities.StaticOneTo)s and
+[`StaticBitVector`](@ref UnrolledUtilities.StaticBitVector)s, which do not
+provide constructors for `Tuple`s because there is no performance benefit to
+making a lazy or low-storage data structure once a corresponding high-storage
+data structure has already been constructed.
+"""
+@inline constructor_from_tuple(::Type) = identity
+@inline constructor_from_tuple(::Type{NT}) where {NT <: NamedTuple} = NT
+
+@inline inferred_constructor_from_tuple(itr) =
+ constructor_from_tuple(inferred_output_type(itr))
+
+@inline promoted_constructor_from_tuple(itrs) =
+ constructor_from_tuple(promoted_output_type(itrs))
+
+@inline inferred_empty(itr) = inferred_constructor_from_tuple(itr)(())
+
+@inline promoted_empty(itrs) = promoted_constructor_from_tuple(itrs)(())
diff --git a/test/aqua.jl b/test/aqua.jl
index d7becf1..ff1edd1 100644
--- a/test/aqua.jl
+++ b/test/aqua.jl
@@ -1,3 +1,4 @@
+using Test
import Aqua, UnrolledUtilities
# This is separate from all the other tests because Aqua.test_all checks for
diff --git a/test/runtests.jl b/test/runtests.jl
index 631181c..0cfddab 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -2,7 +2,9 @@ using SafeTestsets
@safetestset "Test and Analyze" begin
@time include("test_and_analyze.jl")
- print_comparison_table()
+ for (title, comparison_table_dict) in comparison_table_dicts
+ print_comparison_table(title, comparison_table_dict)
+ end
end
@safetestset "Aqua" begin
@time include("aqua.jl")
diff --git a/test/test_and_analyze.jl b/test/test_and_analyze.jl
index 70415e3..cd2a7bc 100644
--- a/test/test_and_analyze.jl
+++ b/test/test_and_analyze.jl
@@ -6,45 +6,78 @@ using InteractiveUtils
using UnrolledUtilities
-comparison_table_dict = OrderedDict()
+comparison_table_dicts = OrderedDict()
-function print_comparison_table(io = stdout, generate_html = false)
+function print_comparison_table(title, comparison_table_dict, io = stdout)
table_data =
mapreduce(vcat, collect(comparison_table_dict)) do (key, entries)
stack(entry -> (key..., entry...), entries; dims = 1)
end
- highlighter(f, color) =
- generate_html ? HtmlHighlighter(f, HtmlDecoration(; color)) :
- Highlighter(f, Crayon(; foreground = Symbol(color)))
-
- better_performance_but_harder_to_compile =
- highlighter(generate_html ? "royalblue" : "blue") do data, i, j
- data[i, 4] != data[i, 5] &&
- (endswith(data[i, 6], "slower") || endswith(data[i, 7], "more"))
- end
- better_performance =
- highlighter(generate_html ? "mediumseagreen" : "green") do data, i, j
- data[i, 4] != data[i, 5]
- end
- mixed_compilation =
- highlighter(generate_html ? "mediumorchid" : "magenta") do data, i, j
- (endswith(data[i, 6], "slower") && endswith(data[i, 7], "less")) ||
- (endswith(data[i, 6], "faster") && endswith(data[i, 7], "more"))
- end
- harder_to_compile =
- highlighter(generate_html ? "indianred" : "red") do data, i, j
- endswith(data[i, 6], "slower") || endswith(data[i, 7], "more")
- end
- easier_to_compile =
- highlighter(generate_html ? "darkturquoise" : "cyan") do data, i, j
- endswith(data[i, 6], "faster") || endswith(data[i, 7], "less")
+ writing_to_docs = io isa IOStream
+
+ color(color_str) =
+ writing_to_docs ? HtmlDecoration(; color = color_str) :
+ Crayon(; foreground = Symbol(color_str))
+ highlighter_color(optimization, run_time, compile_time, allocs) =
+ if (
+ contains(optimization, "better") ||
+ contains(optimization, "fewer allocs")
+ ) && !contains(run_time, "more") ||
+ contains(optimization, "identical") && contains(run_time, "less")
+ # better performance
+ if !contains(compile_time, "more") && !contains(allocs, "more")
+ # similar or better compilation and total allocations
+ if contains(optimization, "better")
+ # better optimization
+ color(writing_to_docs ? "darkturquoise" : "cyan")
+ else
+ # faster run time or fewer allocations at run time
+ color(writing_to_docs ? "mediumseagreen" : "green")
+ end
+ else
+ # worse compilation or total allocations
+ if contains(optimization, "better")
+ # better optimization
+ color(writing_to_docs ? "royalblue" : "blue")
+ else
+ # faster run time or fewer allocations at run time
+ color(writing_to_docs ? "khaki" : "yellow")
+ end
+ end
+ elseif contains(optimization, "identical") &&
+ contains(run_time, "similar")
+ # similar performance
+ if contains(compile_time, "less") && !contains(allocs, "more") ||
+ !contains(compile_time, "more") && contains(allocs, "less")
+ # better compilation or total allocations
+ color(writing_to_docs ? "mediumorchid" : "magenta")
+ elseif contains(compile_time, "less") && contains(allocs, "more") ||
+ contains(compile_time, "more") && contains(allocs, "less")
+ # mixed compilation and total allocations
+ color(writing_to_docs ? "silver" : "light_gray")
+ elseif contains(compile_time, "similar") &&
+ contains(allocs, "similar")
+ # similar compilation and total allocations
+ color(writing_to_docs ? "gray" : "dark_gray")
+ else
+ # worse compilation or total allocations
+ color(writing_to_docs ? "indianred" : "red")
+ end
+ else
+ # worse performance
+ color(writing_to_docs ? "indianred" : "red")
end
- no_difference =
- highlighter((data, i, j) -> true, generate_html ? "khaki" : "yellow")
+ highlighter = (writing_to_docs ? HtmlHighlighter : Highlighter)(
+ Returns(true),
+ (_, data, row, _) -> highlighter_color(data[row, 6:9]...),
+ )
+
+ # TODO: Why does Sys.maxrss() always seem to be 0 on Ubuntu systems?
+ has_rss = any(contains('['), table_data[:, 9])
other_kwargs =
- generate_html ?
+ writing_to_docs ?
(;
backend = Val(:html),
table_style = Dict(
@@ -53,38 +86,86 @@ function print_comparison_table(io = stdout, generate_html = false)
),
) :
(;
+ title,
+ title_alignment = :c,
title_same_width_as_table = true,
- columns_width = [45, 45, 0, 0, 0, 0, 0],
+ columns_width = [45, 45, 15, 10, 30, 25, 20, 20, has_rss ? 30 : 20],
linebreaks = true,
autowrap = true,
crop = :none,
)
+ if writing_to_docs
+ println(io, "## $title")
+ println(io, "```@raw html")
+ println(io, "