-
-
Notifications
You must be signed in to change notification settings - Fork 511
/
parallel.go
121 lines (101 loc) · 2.55 KB
/
parallel.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
package testcontainers
import (
"context"
"fmt"
"sync"
)
const (
defaultWorkersCount = 8
)
type ParallelContainerRequest []GenericContainerRequest
// ParallelContainersOptions represents additional options for parallel running
type ParallelContainersOptions struct {
WorkersCount int // count of parallel workers. If field empty(zero), default value will be 'defaultWorkersCount'
}
// ParallelContainersRequestError represents error from parallel request
type ParallelContainersRequestError struct {
Request GenericContainerRequest
Error error
}
type ParallelContainersError struct {
Errors []ParallelContainersRequestError
}
func (gpe ParallelContainersError) Error() string {
return fmt.Sprintf("%v", gpe.Errors)
}
func parallelContainersRunner(
ctx context.Context,
requests <-chan GenericContainerRequest,
errors chan<- ParallelContainersRequestError,
containers chan<- Container,
wg *sync.WaitGroup,
) {
for req := range requests {
c, err := GenericContainer(ctx, req)
if err != nil {
errors <- ParallelContainersRequestError{
Request: req,
Error: err,
}
continue
}
containers <- c
}
wg.Done()
}
// ParallelContainers creates a generic containers with parameters and run it in parallel mode
func ParallelContainers(ctx context.Context, reqs ParallelContainerRequest, opt ParallelContainersOptions) ([]Container, error) {
if opt.WorkersCount == 0 {
opt.WorkersCount = defaultWorkersCount
}
tasksChanSize := opt.WorkersCount
if tasksChanSize > len(reqs) {
tasksChanSize = len(reqs)
}
tasksChan := make(chan GenericContainerRequest, tasksChanSize)
errsChan := make(chan ParallelContainersRequestError)
resChan := make(chan Container)
waitRes := make(chan struct{})
containers := make([]Container, 0)
errors := make([]ParallelContainersRequestError, 0)
wg := sync.WaitGroup{}
wg.Add(tasksChanSize)
// run workers
for i := 0; i < tasksChanSize; i++ {
go parallelContainersRunner(ctx, tasksChan, errsChan, resChan, &wg)
}
go func() {
for {
select {
case c, ok := <-resChan:
if !ok {
resChan = nil
} else {
containers = append(containers, c)
}
case e, ok := <-errsChan:
if !ok {
errsChan = nil
} else {
errors = append(errors, e)
}
}
if resChan == nil && errsChan == nil {
waitRes <- struct{}{}
break
}
}
}()
for _, req := range reqs {
tasksChan <- req
}
close(tasksChan)
wg.Wait()
close(resChan)
close(errsChan)
<-waitRes
if len(errors) != 0 {
return containers, ParallelContainersError{Errors: errors}
}
return containers, nil
}