-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpardi.opam
51 lines (48 loc) · 1.75 KB
/
pardi.opam
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
opam-version: "2.0"
authors: "Francois Berenger"
maintainer: "[email protected]"
homepage: "https://github.com/UnixJunkie/pardi"
bug-reports: "https://github.com/UnixJunkie/pardi/issues"
dev-repo: "git+https://github.com/UnixJunkie/pardi.git"
license: "GPL-1.0-or-later"
build: ["dune" "build" "-p" name "-j" jobs]
depends: [
"dune" {>= "1.11"}
"batteries"
"dolog" {>= "4.0.0"}
"parany" {>= "11.0.0"}
"minicli" {>= "5.0.0"}
"ocaml" {>= "4.05.0"}
## only the dev. version needs those
# "lz4"
# "cryptokit"
# "zmq" {>= "5.0.0"}
]
synopsis: "Parallel execution of command lines, pardi!"
description: """
Almost like GNU parallel; just better.
Pardi pushes further the point at which you have to use a supercomputer.
Alternatively, it can be used on a supercomputer to make life in there
much more fun and productive.
Put back the fun into computing: use pardi!
usage:
pardi ...
{-i|--input} <file>: where to read from
{-o|--output} <file>: where to write to (default=stdout)
[-s|--shell]: only shell commands in input file
[{-n|--nprocs} <int>]: max jobs in parallel (default=all cores)
[{-c|--chunks} <int>]: how many chunks per job (default=1)
[{-d|--demux} {l|b:<int>|r:<regexp>|s:<string>}]: how to cut input
file into chunks (line/bytes/regexp/sep_line; default=line)
{-w|--work} <string>: command to execute on each chunk
%IN and %OUT are special tokens
[{-m|--mux} {c|n}]: how to mux job results in output file
(cat/null; default=cat)
[{-ie|--input-ext} <string>]: append file extension to work input files
[{-oe|--output-ext} <string>]: append file extension to work output files
[{-p|--preserve}]: mux results while preserving input order
"""
# url {
# src: "https://github.com/UnixJunkie/pardi/archive/XXX.tar.gz"
# checksum: "md5=YYY"
# }