-
Notifications
You must be signed in to change notification settings - Fork 7
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Separately track connecting fronts and do not clear them on new configs #50
Changes from 2 commits
d16339d
cc002f7
41251ed
008e810
9ee0721
cfa8beb
bd8e939
2a65be3
7f93522
b2c702b
800f367
55e5821
12a4450
6b6fc13
9b577b9
fe2273f
988625b
7eddee1
83d7257
8be6775
d7ef158
b9307b7
67fc744
9a7741a
2954726
7bc4f00
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,6 +21,8 @@ import ( | |
|
||
"github.com/getlantern/golog" | ||
"github.com/getlantern/ops" | ||
|
||
"github.com/alitto/pond/v2" | ||
) | ||
|
||
const ( | ||
|
@@ -144,6 +146,7 @@ func loadFronts(providers map[string]*Provider) sortedFronts { | |
|
||
fronts := make(sortedFronts, size) | ||
|
||
// Note that map iteration order is random, so the order of the providers is automatically randomized. | ||
index := 0 | ||
for key, p := range providers { | ||
arr := p.Masquerades | ||
|
@@ -209,38 +212,66 @@ func Vet(m *Masquerade, pool *x509.CertPool, testURL string) bool { | |
return masq.postCheck(conn, testURL) | ||
} | ||
|
||
// findWorkingFronts finds working domain fronts by vetting them in batches and in | ||
// parallel. Speed is of the essence here, as without working fronts, users will | ||
// findWorkingFronts finds working domain fronts by testing them using a worker pool. Speed | ||
// is of the essence here, as without working fronts, users will | ||
// be unable to fetch proxy configurations, particularly in the case of a first time | ||
// user who does not have proxies cached on disk. | ||
func (f *fronted) findWorkingFronts() { | ||
// vet fronts in batches | ||
const batchSize int = 40 | ||
|
||
// Keep looping through all fronts making sure we have working ones. | ||
i := 0 | ||
for { | ||
// Continually loop through the fronts in batches until we have 4 working ones, | ||
// always looping around to the beginning if we reach the end. | ||
// Continually loop through the fronts until we have 4 working ones. | ||
// This is important, for example, when the user goes offline and all fronts start failing. | ||
// We want to just keep trying in that case so that we find working fronts as soon as they | ||
// come back online. | ||
if f.connectingFronts.size() < 4 { | ||
f.vetBatch(i, batchSize) | ||
i = index(i, batchSize, f.frontSize()) | ||
if !f.hasEnoughWorkingFronts() { | ||
// Note that trying all fronts takes awhile, as it only completes when we either | ||
// have enough working fronts, or we've tried all of them. | ||
log.Debug("findWorkingFronts::Trying all fronts") | ||
f.tryAllFronts() | ||
log.Debug("findWorkingFronts::Tried all fronts") | ||
} else { | ||
log.Debug("findWorkingFronts::Enough working fronts...sleeping") | ||
select { | ||
case <-f.stopCh: | ||
log.Debug("Stopping parallel dialing") | ||
log.Debug("findWorkingFronts::Stopping parallel dialing") | ||
return | ||
case <-time.After(time.Duration(rand.IntN(12000)) * time.Millisecond): | ||
// Run again after a random time between 0 and 12 seconds | ||
} | ||
} | ||
} | ||
} | ||
|
||
func index(i, batchSize, size int) int { | ||
return (i + batchSize) % size | ||
func (f *fronted) tryAllFronts() { | ||
// Vet fronts using a worker pool of 40 goroutines. | ||
pool := pond.NewPool(40) | ||
|
||
// Submit all fronts to the worker pool. | ||
for i := 0; i < f.frontSize(); i++ { | ||
i := i | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
m := f.frontAt(i) | ||
pool.Submit(func() { | ||
log.Debugf("Running task #%d with front %v", i, m.getIpAddress()) | ||
if f.hasEnoughWorkingFronts() { | ||
// We have enough working fronts, so no need to continue. | ||
log.Debug("Enough working fronts...ignoring task") | ||
return | ||
} | ||
working := f.vetFront(m) | ||
if working { | ||
f.connectingFronts.onConnected(m) | ||
} else { | ||
m.markFailed() | ||
} | ||
}) | ||
} | ||
|
||
// Stop the pool and wait for all submitted tasks to complete | ||
pool.StopAndWait() | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should probably still stop testing fronts when There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ok I just added a check in the funcs to see if we're stopped -- there's otherwise no good way to kill the extant workers. |
||
|
||
func (f *fronted) hasEnoughWorkingFronts() bool { | ||
return f.connectingFronts.size() >= 4 | ||
} | ||
|
||
func (f *fronted) frontSize() int { | ||
|
@@ -255,24 +286,6 @@ func (f *fronted) frontAt(i int) Front { | |
return f.fronts[i] | ||
} | ||
|
||
func (f *fronted) vetBatch(start, batchSize int) { | ||
log.Debugf("Vetting masquerade batch %d-%d", start, start+batchSize) | ||
var wg sync.WaitGroup | ||
for i := start; i < start+batchSize && i < f.frontSize(); i++ { | ||
wg.Add(1) | ||
go func(m Front) { | ||
defer wg.Done() | ||
working := f.vetFront(m) | ||
if working { | ||
f.connectingFronts.onConnected(m) | ||
} else { | ||
m.markFailed() | ||
} | ||
}(f.frontAt(i)) | ||
} | ||
wg.Wait() | ||
} | ||
|
||
func (f *fronted) vetFront(m Front) bool { | ||
conn, masqueradeGood, err := f.dialFront(m) | ||
if err != nil { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is also a key change @garmr-ulfr
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So this is meant to run forever (unless stopped) ensuring that we always have at least 4 working fronts, if possible, correct? I like it!
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Since we need to find 4 as soon as possible, it would be much faster to have X workers running independently instead of in a group. Right now, we can't vet the next batch until the entire current batch has been vetted. We could be waiting on just one to timeout even though the rest have already finished.
Something like this:
this isn't tested or anything and it might not be complete. But, just an idea.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ah interesting so make sure x goroutines are always running...gotta think about that but I like the idea
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The idea of just using workers instead of a waitgroup is interesting. My only hesitation is that the pattern tends to be that, if a front is going to fail, it's usually going to timeout, in which case the entire batch just fails at the 5 second mark. That said, that's not always the case, and sometime they fail right away on a cert mismatch or something, so I do agree with the change -- will look more at that today