-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtcp_bbr2plus.c
3274 lines (2907 loc) · 120 KB
/
tcp_bbr2plus.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* BBR (Bottleneck Bandwidth and RTT) congestion control, v2
*
* BBRv2 is a model-based congestion control algorithm that aims for low
* queues, low loss, and (bounded) Reno/CUBIC coexistence. To maintain a model
* of the network path, it uses measurements of bandwidth and RTT, as well as
* (if they occur) packet loss and/or DCTCP/L4S-style ECN signals. Note that
* although it can use ECN or loss signals explicitly, it does not require
* either; it can bound its in-flight data based on its estimate of the BDP.
*
* The model has both higher and lower bounds for the operating range:
* lo: bw_lo, inflight_lo: conservative short-term lower bound
* hi: bw_hi, inflight_hi: robust long-term upper bound
* The bandwidth-probing time scale is (a) extended dynamically based on
* estimated BDP to improve coexistence with Reno/CUBIC; (b) bounded by
* an interactive wall-clock time-scale to be more scalable and responsive
* than Reno and CUBIC.
*
* Here is a state transition diagram for BBR:
*
* |
* V
* +---> STARTUP ----+
* | | |
* | V |
* | DRAIN ----+
* | | |
* | V |
* +---> PROBE_BW ----+
* | ^ | |
* | | | |
* | +----+ |
* | |
* +---- PROBE_RTT <--+
*
* A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
* When it estimates the pipe is full, it enters DRAIN to drain the queue.
* In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
* A long-lived BBR flow spends the vast majority of its time remaining
* (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
* in a fair manner, with a small, bounded queue. *If* a flow has been
* continuously sending for the entire min_rtt window, and hasn't seen an RTT
* sample that matches or decreases its min_rtt estimate for 10 seconds, then
* it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
* the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
* we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
* otherwise we enter STARTUP to try to fill the pipe.
*
* BBR is described in detail in:
* "BBR: Congestion-Based Congestion Control",
* Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
* Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
*
* There is a public e-mail list for discussing BBR development and testing:
* https://groups.google.com/forum/#!forum/bbr-dev
*
* NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
* otherwise TCP stack falls back to an internal pacing using one high
* resolution timer per TCP socket and may use more resources.
*/
#include <linux/module.h>
#include <net/tcp.h>
#include <linux/inet_diag.h>
#include <linux/inet.h>
#include <linux/random.h>
#include <linux/win_minmax.h>
/* As time advances, update the 1st, 2nd, and 3rd choices. */
static u32 __local_minmax_subwin_update(struct minmax *m, u32 win,
const struct minmax_sample *val)
{
u32 dt = val->t - m->s[0].t;
if (unlikely(dt > win)) {
/*
* Passed entire window without a new val so make 2nd
* choice the new val & 3rd choice the new 2nd choice.
* we may have to iterate this since our 2nd choice
* may also be outside the window (we checked on entry
* that the third choice was in the window).
*/
m->s[0] = m->s[1];
m->s[1] = m->s[2];
m->s[2] = *val;
if (unlikely(val->t - m->s[0].t > win)) {
m->s[0] = m->s[1];
m->s[1] = m->s[2];
m->s[2] = *val;
}
} else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) {
/*
* We've passed a quarter of the window without a new val
* so take a 2nd choice from the 2nd quarter of the window.
*/
m->s[2] = m->s[1] = *val;
} else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) {
/*
* We've passed half the window without finding a new val
* so take a 3rd choice from the last half of the window
*/
m->s[2] = *val;
}
return m->s[0].v;
}
/* Check if new measurement updates the 1st, 2nd or 3rd choice min. */
u32 __local_minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas)
{
struct minmax_sample val = { .t = t, .v = meas };
if (unlikely(val.v <= m->s[0].v) || /* found new min? */
unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */
return minmax_reset(m, t, meas); /* forget earlier samples */
if (unlikely(val.v <= m->s[1].v))
m->s[2] = m->s[1] = val;
else if (unlikely(val.v <= m->s[2].v))
m->s[2] = val;
return __local_minmax_subwin_update(m, win, &val);
}
//add dctcp header content here
static inline void dctcp_ece_ack_cwr(struct sock *sk, u32 ce_state)
{
struct tcp_sock *tp = tcp_sk(sk);
if (ce_state == 1)
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
else
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
/* Minimal DCTP CE state machine:
*
* S: 0 <- last pkt was non-CE
* 1 <- last pkt was CE
*/
static inline void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
u32 *prior_rcv_nxt, u32 *ce_state)
{
u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
if (*ce_state != new_ce_state) {
/* CE state has changed, force an immediate ACK to
* reflect the new CE state. If an ACK was delayed,
* send that first to reflect the prior CE state.
*/
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
dctcp_ece_ack_cwr(sk, *ce_state);
__tcp_send_ack(sk, *prior_rcv_nxt);
}
inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
*prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
*ce_state = new_ce_state;
dctcp_ece_ack_cwr(sk, new_ce_state);
}
/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
* estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
* This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
* Since the minimum window is >=4 packets, the lower bound isn't
* an issue. The upper bound isn't an issue with existing technologies.
*/
#define BW_SCALE 24
#define BW_UNIT (1 << BW_SCALE)
#define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */
#define BBR_UNIT (1 << BBR_SCALE)
#define FLAG_DEBUG_VERBOSE 0x1 /* Verbose debugging messages */
#define FLAG_DEBUG_LOOPBACK 0x2 /* Do NOT skip loopback addr */
#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
/* BBR has the following modes for deciding how fast to send: */
enum bbr_mode {
BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */
BBR_DRAIN, /* drain any queue created during startup */
BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */
BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */
};
/* How does the incoming ACK stream relate to our bandwidth probing? */
enum bbr_ack_phase {
BBR_ACKS_INIT, /* not probing; not getting probe feedback */
BBR_ACKS_REFILLING, /* sending at est. bw to fill pipe */
BBR_ACKS_PROBE_STARTING, /* inflight rising to probe bw */
BBR_ACKS_PROBE_FEEDBACK, /* getting feedback from bw probing */
BBR_ACKS_PROBE_STOPPING, /* stopped probing; still getting feedback */
};
/* BBR congestion control block */
struct bbr {
u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */
u32 min_rtt_stamp; /* timestamp of min_rtt_us */
u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */
u32 probe_rtt_min_us; /* min RTT in bbr_probe_rtt_win_ms window */
u32 probe_rtt_min_stamp; /* timestamp of probe_rtt_min_us*/
u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
u32 prior_rcv_nxt; /* tp->rcv_nxt when CE state last changed */
u64 cycle_mstamp; /* time of this cycle phase start */
u32 mode:3, /* current bbr_mode in state machine */
prev_ca_state:3, /* CA state on previous ACK */
packet_conservation:1, /* use packet conservation? */
round_start:1, /* start of packet-timed tx->ack round? */
ce_state:1, /* If most recent data has CE bit set */
bw_probe_up_rounds:5, /* cwnd-limited rounds in PROBE_UP */
try_fast_path:1, /* can we take fast path? */
unused2:11,
idle_restart:1, /* restarting after idle? */
probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
cycle_idx:3, /* current index in pacing_gain cycle array */
has_seen_rtt:1; /* have we seen an RTT sample yet? */
u32 pacing_gain:11, /* current gain for setting pacing rate */
cwnd_gain:11, /* current gain for setting cwnd */
full_bw_reached:1, /* reached full bw in Startup? */
full_bw_cnt:2, /* number of rounds without large bw gains */
init_cwnd:7; /* initial cwnd */
u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
u32 full_bw; /* recent bw, to estimate if pipe is full */
/* For tracking ACK aggregation: */
u64 ack_epoch_mstamp; /* start of ACK sampling epoch */
u16 extra_acked[2]; /* max excess data ACKed in epoch */
u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
extra_acked_win_idx:1, /* current index in extra_acked array */
/* BBR v2 state: */
unused1:2,
startup_ecn_rounds:2, /* consecutive hi ECN STARTUP rounds */
loss_in_cycle:1, /* packet loss in this cycle? */
ecn_in_cycle:1; /* ECN in this cycle? */
u32 loss_round_delivered; /* scb->tx.delivered ending loss round */
u32 undo_bw_lo; /* bw_lo before latest losses */
u32 undo_inflight_lo; /* inflight_lo before latest losses */
u32 undo_inflight_hi; /* inflight_hi before latest losses */
u32 bw_latest; /* max delivered bw in last round trip */
u32 bw_lo; /* lower bound on sending bandwidth */
u32 bw_hi[2]; /* upper bound of sending bandwidth range*/
u32 inflight_latest; /* max delivered data in last round trip */
u32 inflight_lo; /* lower bound of inflight data range */
u32 inflight_hi; /* upper bound of inflight data range */
u32 bw_probe_up_cnt; /* packets delivered per inflight_hi incr */
u32 bw_probe_up_acks; /* packets (S)ACKed since inflight_hi incr */
u32 probe_wait_us; /* PROBE_DOWN until next clock-driven probe */
u32 ecn_eligible:1, /* sender can use ECN (RTT, handshake)? */
ecn_alpha:9, /* EWMA delivered_ce/delivered; 0..256 */
bw_probe_samples:1, /* rate samples reflect bw probing? */
prev_probe_too_high:1, /* did last PROBE_UP go too high? */
stopped_risky_probe:1, /* last PROBE_UP stopped due to risk? */
rounds_since_probe:8, /* packet-timed rounds since probed bw */
loss_round_start:1, /* loss_round_delivered round trip? */
loss_in_round:1, /* loss marked in this round trip? */
ecn_in_round:1, /* ECN marked in this round trip? */
ack_phase:3, /* bbr_ack_phase: meaning of ACKs */
loss_events_in_round:4,/* losses in STARTUP round */
initialized:1; /* has bbr_init() been called? */
u32 alpha_last_delivered; /* tp->delivered at alpha update */
u32 alpha_last_delivered_ce; /* tp->delivered_ce at alpha update */
/* Params configurable using setsockopt. Refer to correspoding
* module param for detailed description of params.
*/
struct bbr_params {
u32 high_gain:11, /* max allowed value: 2047 */
drain_gain:10, /* max allowed value: 1023 */
cwnd_gain:11; /* max allowed value: 2047 */
u32 cwnd_min_target:4, /* max allowed value: 15 */
min_rtt_win_sec:5, /* max allowed value: 31 */
probe_rtt_mode_ms:9, /* max allowed value: 511 */
full_bw_cnt:3, /* max allowed value: 7 */
bw_rtts:5, /* max allowed value: 31 */
cwnd_tso_budget:1, /* allowed values: {0, 1} */
unused3:1,
drain_to_target:1, /* boolean */
precise_ece_ack:1, /* boolean */
extra_acked_in_startup:1, /* allowed values: {0, 1} */
fast_path:1; /* boolean */
u32 full_bw_thresh:10, /* max allowed value: 1023 */
startup_cwnd_gain:11, /* max allowed value: 2047 */
bw_probe_pif_gain:9, /* max allowed value: 511 */
usage_based_cwnd:1, /* boolean */
unused2:1;
u16 probe_rtt_win_ms:14, /* max allowed value: 16383 */
refill_add_inc:2; /* max allowed value: 3 */
u16 extra_acked_gain:11, /* max allowed value: 2047 */
extra_acked_win_rtts:5; /* max allowed value: 31*/
u16 pacing_gain[CYCLE_LEN]; /* max allowed value: 1023 */
/* Mostly BBR v2 parameters below here: */
u32 ecn_alpha_gain:8, /* max allowed value: 255 */
ecn_factor:8, /* max allowed value: 255 */
ecn_thresh:8, /* max allowed value: 255 */
beta:8; /* max allowed value: 255 */
u32 ecn_max_rtt_us:19, /* max allowed value: 524287 */
bw_probe_reno_gain:9, /* max allowed value: 511 */
full_loss_cnt:4; /* max allowed value: 15 */
u32 probe_rtt_cwnd_gain:8, /* max allowed value: 255 */
inflight_headroom:8, /* max allowed value: 255 */
loss_thresh:8, /* max allowed value: 255 */
bw_probe_max_rounds:8; /* max allowed value: 255 */
u32 bw_probe_rand_rounds:4, /* max allowed value: 15 */
bw_probe_base_us:26, /* usecs: 0..2^26-1 (67 secs) */
full_ecn_cnt:2; /* max allowed value: 3 */
u32 bw_probe_rand_us:26, /* usecs: 0..2^26-1 (67 secs) */
undo:1, /* boolean */
tso_rtt_shift:4, /* max allowed value: 15 */
unused5:1;
u32 ecn_reprobe_gain:9, /* max allowed value: 511 */
unused1:14,
ecn_alpha_init:9; /* max allowed value: 256 */
} params;
struct {
u32 snd_isn; /* Initial sequence number */
u32 rs_bw; /* last valid rate sample bw */
u32 target_cwnd; /* target cwnd, based on BDP */
u8 undo:1, /* Undo even happened but not yet logged */
unused:7;
char event; /* single-letter event debug codes */
u16 unused2;
} debug;
//ICSK_CA_PRIV_SIZE is changed to 400 to include these new vars.
struct {
struct minmax rc_min_rtt_us;
struct minmax max_jitter_us;
u32 ever_measured_mrtt;
u32 rtt_comp_thresh;
u32 last_round_srtt_us;
u32 curr_round_srtt_us;
u32 last_round_min_rtt_us;
u32 curr_round_min_rtt_us;
u32 mrtt_in_cruise;
u32 last_mrtt_in_cruise;
u32 cruise_round_of_mrtt_change;
int srtt_rnd_cnt;
//u32 bw_before_probe;
u32 mrtt_before_probe;
u32 rtt_cnt;
int probe_wait_round;
u32 rounds_since_last_advancing_bw_filter;
int rtt_sample_cnt;
}bbr2plus;
struct {
u32 switch_to_bbr2_thresh;
u32 switch_to_bbr2p_thresh;
u32 mode_switch_rtt_low_thresh;
u32 mode_switch_rtt_high_thresh;
u32 rc_min_rtt_win_sz;
int rtt_comp_rtt_var_thresh; //R
int rtt_comp_startup_thresh; //alpha
int rtt_comp_thresh; //alpha
int fast_conv_probe_cycle_base;
int fast_conv_probe_cycle_random;
int fast_conv_srtt_round;
int fast_conv_rtt_thresh; //1.25
int fast_conv_preup_rtt_thresh;
u32 fast_conv_rtt_error_us;
u32 fast_conv_probe_again_thresh; //1.20
int rtt_comp_on;
int fast_conv_on;
bool copa_style;
u32 fast_conv_rounds_to_advance_bw_filter;
u32 rtt_comp_jitter_win_sz;
}bbr2plus_params;
};
struct bbr_context {
u32 sample_bw;
u32 target_cwnd;
u32 log:1;
};
/*BBRv2+ paramters*/
static int bbr2p_rc_min_rtt_win_sz = 4; //similar to RTprop
/*max_rtt - min_rtt >= (alpha - 1) * min_rtt */
static int bbr2p_rtt_comp_rtt_var_thresh = BBR_UNIT * 4 / 10;
static int bbr2p_rtt_comp_startup_thresh = BBR_UNIT * 2885 / 1000 + 1; // similar to startup high gain (Startup)
static int bbr2p_rtt_comp_thresh = 2 * BBR_UNIT; // similar to normal gain
// static int bbr2p_rtt_comp_cwnd_factor = BBR_UNIT * 2 / 2; //obsolete
static int bbr2p_rtt_comp_jitter_win_sz = 4; //max_filter for jitters
static int bbr2p_fast_conv_probe_cycle_base = 8; //match BBR's probing freq
static int bbr2p_fast_conv_probe_cycle_random = 4; //add a small randomized component to avoid flow sync.
static int bbr2p_fast_conv_srtt_round = 1; //fixed at the moment
static int bbr2p_fast_conv_rtt_thresh = BBR_UNIT * 11 / 10; // default: 1.1
static int bbr2p_fast_conv_preup_rtt_thresh = BBR_UNIT * 2 / 100; //gamma, typical Internet RTT: 20ms -- 300ms, 2% -> 0.4ms ~ 6ms
static u32 bbr2p_fast_conv_rtt_error_us = 2000;
static u32 bbr2p_fast_conv_probe_again_thresh = BBR_UNIT * 2 / 100; //gamma
static u32 bbr2p_fast_conv_rounds_to_advance_bw_filter = 25; //25 RTT
static int bbr2p_rtt_comp_on = 1;
static int bbr2p_fast_conv_on = 1;
static bool bbr2p_collect_measurements = false;
static bool bbr2p_copa_style = false;
static u32 bbr2p_switch_to_bbr2_thresh = 2; //0 disable
static u32 bbr2p_switch_to_bbr2p_thresh = 4; //0 disable
static u32 bbr2p_mode_switch_rtt_low_thresh = BBR_UNIT * 1 / 20;
static u32 bbr2p_mode_switch_rtt_high_thresh = BBR_UNIT * 1 / 10;
/* Export BBRv2 plus parameters. */
module_param_named(bbr2plus_rc_min_rtt_win_sz, bbr2p_rc_min_rtt_win_sz, int, 0644);
module_param_named(bbr2plus_rtt_comp_startup_thresh, bbr2p_rtt_comp_startup_thresh, int, 0644);
module_param_named(bbr2plus_rtt_comp_rtt_var_thresh, bbr2p_rtt_comp_rtt_var_thresh, int, 0644);
module_param_named(bbr2plus_rtt_comp_thresh, bbr2p_rtt_comp_thresh, int, 0644);
// module_param_named(bbr2plus_rtt_comp_cwnd_factor, bbr2p_rtt_comp_cwnd_factor, int, 0644);
module_param_named(bbr2plus_rtt_comp_jitter_win_sz, bbr2p_rtt_comp_jitter_win_sz, int, 0644);
module_param_named(bbr2plus_fast_conv_probe_cycle_base, bbr2p_fast_conv_probe_cycle_base, int, 0644);
module_param_named(bbr2plus_fast_conv_probe_cycle_random, bbr2p_fast_conv_probe_cycle_random, int, 0644);
module_param_named(bbr2plus_fast_conv_srtt_round, bbr2p_fast_conv_srtt_round, int, 0644);
module_param_named(bbr2plus_fast_conv_rtt_thresh, bbr2p_fast_conv_rtt_thresh, int, 0644);
module_param_named(bbr2plus_fast_conv_preup_rtt_thresh, bbr2p_fast_conv_preup_rtt_thresh, int, 0644);
module_param_named(bbr2plus_fast_conv_rtt_error_us, bbr2p_fast_conv_rtt_error_us, int, 0644);
module_param_named(bbr2plus_fast_conv_probe_again_thresh, bbr2p_fast_conv_probe_again_thresh, int, 0644);
module_param_named(bbr2plus_rtt_comp_on, bbr2p_rtt_comp_on, int, 0644);
module_param_named(bbr2plus_fast_conv_on, bbr2p_fast_conv_on, int, 0644);
module_param_named(bbr2plus_collect_measurements, bbr2p_collect_measurements, bool, 0644);
module_param_named(bbr2plus_copa_style, bbr2p_copa_style, bool, 0644);
module_param_named(bbr2plus_fast_conv_rounds_to_advance_bw_filter, bbr2p_fast_conv_rounds_to_advance_bw_filter, int, 0644);
module_param_named(bbr2plus_switch_to_bbr2_thresh, bbr2p_switch_to_bbr2_thresh, int, 0644);
module_param_named(bbr2plus_switch_to_bbr2p_thresh, bbr2p_switch_to_bbr2p_thresh, int, 0644);
module_param_named(bbr2plus_mode_switch_rtt_low_thresh, bbr2p_mode_switch_rtt_low_thresh, int, 0644);
module_param_named(bbr2plus_mode_switch_rtt_high_thresh, bbr2p_mode_switch_rtt_high_thresh, int, 0644);
/* Window length of bw filter (in rounds). Max allowed value is 31 (0x1F) */
static int bbr_bw_rtts = CYCLE_LEN + 2;
/* Window length of min_rtt filter (in sec). Max allowed value is 31 (0x1F) */
static u32 bbr_min_rtt_win_sec = 10;
/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode.
* Max allowed value is 511 (0x1FF).
*/
static u32 bbr_probe_rtt_mode_ms = 200;
/* Window length of probe_rtt_min_us filter (in ms), and consequently the
* typical interval between PROBE_RTT mode entries.
* Note that bbr_probe_rtt_win_ms must be <= bbr_min_rtt_win_sec * MSEC_PER_SEC
*/
static u32 bbr_probe_rtt_win_ms = 5000;
/* Skip TSO below the following bandwidth (bits/sec): */
static int bbr_min_tso_rate = 1200000;
/* Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
* in bigger TSO bursts. By default we cut the RTT-based allowance in half
* for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
* is below 1500 bytes after 6 * ~500 usec = 3ms.
*/
static u32 bbr_tso_rtt_shift = 9; /* halve allowance per 2^9 usecs, 512us */
/* Select cwnd TSO budget approach:
* 0: padding
* 1: flooring
*/
static uint bbr_cwnd_tso_budget = 1;
/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
* In order to help drive the network toward lower queues and low latency while
* maintaining high utilization, the average pacing rate aims to be slightly
* lower than the estimated bandwidth. This is an important aspect of the
* design.
*/
static const int bbr_pacing_margin_percent = 1;
/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
* that will allow a smoothly increasing pacing rate that will double each RTT
* and send the same number of packets per RTT that an un-paced, slow-starting
* Reno or CUBIC flow would. Max allowed value is 2047 (0x7FF).
*/
static int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1;
/* The gain for deriving startup cwnd. Max allowed value is 2047 (0x7FF). */
static int bbr_startup_cwnd_gain = BBR_UNIT * 2885 / 1000 + 1;
/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
* the queue created in BBR_STARTUP in a single round. Max allowed value
* is 1023 (0x3FF).
*/
static int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs.
* Max allowed value is 2047 (0x7FF).
*/
static int bbr_cwnd_gain = BBR_UNIT * 2;
/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw.
* Max allowed value for each element is 1023 (0x3FF).
*/
enum bbr_pacing_gain_phase {
BBR_BW_PROBE_UP = 0, /* push up inflight to probe for bw/vol */
BBR_BW_PROBE_DOWN = 1, /* drain excess inflight from the queue */
BBR_BW_PROBE_CRUISE = 2, /* use pipe, w/ headroom in queue/pipe */
BBR_BW_PROBE_REFILL = 3, /* v2: refill the pipe again to 100% */
BBR_BW_PROBE_PRE_UP = 4, /*BBRv2+: try if we can really probe more BW. the first RTT of try*/
BBR_BW_PROBE_GUARD = 5, /*BBRv2+: wait one RTT to get feedbacks from PRE_UP. the second RTT of try*/
BBR_BW_PROBE_POST_UP = 6, /*BBRv2+: wait one RTT to get bw measurements from UP (deemed as a part of probeup)*/
BBR_BW_PROBE_DOWN_SLIGHTLY = 7, /*BBRv2+: similiar with probe_down, but with a bigger gain*/
};
//updated gain array
static int bbr_pacing_gain[] = {
BBR_UNIT * 5 / 4, /* probe for more available bw */
BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */
BBR_UNIT, BBR_UNIT, BBR_UNIT * 110 / 100, /* cruise at 1.0*bw to utilize pipe, */
BBR_UNIT, BBR_UNIT, BBR_UNIT * 90 / 100, /* without creating excess queue... */
};
/* Randomize the starting gain cycling phase over N phases: */
static u32 bbr_cycle_rand = 7;
/* Try to keep at least this many packets in flight, if things go smoothly. For
* smooth functioning, a sliding window protocol ACKing every other packet
* needs at least 4 packets in flight. Max allowed value is 15 (0xF).
*/
static u32 bbr_cwnd_min_target = 4;
/* Cwnd to BDP proportion in PROBE_RTT mode scaled by BBR_UNIT. Default: 50%.
* Use 0 to disable. Max allowed value is 255.
*/
static u32 bbr_probe_rtt_cwnd_gain = BBR_UNIT * 1 / 2;
/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
/* If bw has increased significantly (1.25x), there may be more bw available.
* Max allowed value is 1023 (0x3FF).
*/
static u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
/* But after 3 rounds w/o significant bw growth, estimate pipe is full.
* Max allowed value is 7 (0x7).
*/
static u32 bbr_full_bw_cnt = 3;
static u32 bbr_flags; /* Debugging related stuff */
/* Whether to debug using printk.
*/
static bool bbr_debug_with_printk;
/* Whether to debug using ftrace event tcp:tcp_bbr_event.
* Ignored when bbr_debug_with_printk is set.
*/
static bool bbr_debug_ftrace;
/* Experiment: each cycle, try to hold sub-unity gain until inflight <= BDP. */
static bool bbr_drain_to_target = true; /* default: enabled */
/* Experiment: Flags to control BBR with ECN behavior.
*/
static bool bbr_precise_ece_ack = true; /* default: enabled */
/* The max rwin scaling shift factor is 14 (RFC 1323), so the max sane rwin is
* (2^(16+14) B)/(1024 B/packet) = 1M packets.
*/
static u32 bbr_cwnd_warn_val = 1U << 20;
static u16 bbr_debug_port_mask;
/* BBR module parameters. These are module parameters only in Google prod.
* Upstream these are intentionally not module parameters.
*/
static int bbr_pacing_gain_size = CYCLE_LEN;
/* Gain factor for adding extra_acked to target cwnd: */
static int bbr_extra_acked_gain = 256;
/* Window length of extra_acked window. Max allowed val is 31. */
static u32 bbr_extra_acked_win_rtts = 5;
/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
static u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
/* Time period for clamping cwnd increment due to ack aggregation */
static u32 bbr_extra_acked_max_us = 100 * 1000;
/* Use extra acked in startup ?
* 0: disabled
* 1: use latest extra_acked value from 1-2 rtt in startup
*/
static int bbr_extra_acked_in_startup = 1; /* default: enabled */
/* Experiment: don't grow cwnd beyond twice of what we just probed. */
static bool bbr_usage_based_cwnd; /* default: disabled */
/* For lab testing, researchers can enable BBRv2 ECN support with this flag,
* when they know that any ECN marks that the connections experience will be
* DCTCP/L4S-style ECN marks, rather than RFC3168 ECN marks.
* TODO(ncardwell): Production use of the BBRv2 ECN functionality depends on
* negotiation or configuration that is outside the scope of the BBRv2
* alpha release.
*/
static bool bbr_ecn_enable = false;
module_param_named(bw_rtts, bbr_bw_rtts, int, 0644);
module_param_named(min_tso_rate, bbr_min_tso_rate, int, 0644);
module_param_named(tso_rtt_shift, bbr_tso_rtt_shift, int, 0644);
module_param_named(high_gain, bbr_high_gain, int, 0644);
module_param_named(drain_gain, bbr_drain_gain, int, 0644);
module_param_named(startup_cwnd_gain, bbr_startup_cwnd_gain, int, 0644);
module_param_named(cwnd_gain, bbr_cwnd_gain, int, 0644);
module_param_array_named(pacing_gain, bbr_pacing_gain, int,
&bbr_pacing_gain_size, 0644);
module_param_named(cycle_rand, bbr_cycle_rand, uint, 0644);
module_param_named(cwnd_min_target, bbr_cwnd_min_target, uint, 0644);
module_param_named(probe_rtt_cwnd_gain,
bbr_probe_rtt_cwnd_gain, uint, 0664);
module_param_named(cwnd_warn_val, bbr_cwnd_warn_val, uint, 0664);
module_param_named(debug_port_mask, bbr_debug_port_mask, ushort, 0644);
module_param_named(flags, bbr_flags, uint, 0644);
module_param_named(debug_ftrace, bbr_debug_ftrace, bool, 0644);
module_param_named(debug_with_printk, bbr_debug_with_printk, bool, 0644);
module_param_named(min_rtt_win_sec, bbr_min_rtt_win_sec, uint, 0644);
module_param_named(probe_rtt_mode_ms, bbr_probe_rtt_mode_ms, uint, 0644);
module_param_named(probe_rtt_win_ms, bbr_probe_rtt_win_ms, uint, 0644);
module_param_named(full_bw_thresh, bbr_full_bw_thresh, uint, 0644);
module_param_named(full_bw_cnt, bbr_full_bw_cnt, uint, 0644);
module_param_named(cwnd_tso_bduget, bbr_cwnd_tso_budget, uint, 0664);
module_param_named(extra_acked_gain, bbr_extra_acked_gain, int, 0664);
module_param_named(extra_acked_win_rtts,
bbr_extra_acked_win_rtts, uint, 0664);
module_param_named(extra_acked_max_us,
bbr_extra_acked_max_us, uint, 0664);
module_param_named(ack_epoch_acked_reset_thresh,
bbr_ack_epoch_acked_reset_thresh, uint, 0664);
module_param_named(drain_to_target, bbr_drain_to_target, bool, 0664);
module_param_named(precise_ece_ack, bbr_precise_ece_ack, bool, 0664);
module_param_named(extra_acked_in_startup,
bbr_extra_acked_in_startup, int, 0664);
module_param_named(usage_based_cwnd, bbr_usage_based_cwnd, bool, 0664);
module_param_named(ecn_enable, bbr_ecn_enable, bool, 0664);
static void bbr2_exit_probe_rtt(struct sock *sk);
static void bbr2_reset_congestion_signals(struct sock *sk);
static void bbr_check_probe_rtt_done(struct sock *sk);
/* Do we estimate that STARTUP filled the pipe? */
static bool bbr_full_bw_reached(const struct sock *sk)
{
const struct bbr *bbr = inet_csk_ca(sk);
return bbr->full_bw_reached;
}
/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
static u32 bbr_max_bw(const struct sock *sk)
{
struct bbr *bbr = inet_csk_ca(sk);
return max(bbr->bw_hi[0], bbr->bw_hi[1]);
}
/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
static u32 bbr_bw(const struct sock *sk)
{
struct bbr *bbr = inet_csk_ca(sk);
return min(bbr_max_bw(sk), bbr->bw_lo);
}
/* Return maximum extra acked in past k-2k round trips,
* where k = bbr_extra_acked_win_rtts.
*/
static u16 bbr_extra_acked(const struct sock *sk)
{
struct bbr *bbr = inet_csk_ca(sk);
return max(bbr->extra_acked[0], bbr->extra_acked[1]);
}
/* Return rate in bytes per second, optionally with a gain.
* The order here is chosen carefully to avoid overflow of u64. This should
* work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
*/
static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain,
int margin)
{
unsigned int mss = tcp_sk(sk)->mss_cache;
rate *= mss;
rate *= gain;
rate >>= BBR_SCALE;
rate *= USEC_PER_SEC / 100 * (100 - margin);
rate >>= BW_SCALE;
rate = max(rate, 1ULL);
return rate;
}
static u64 bbr_bw_bytes_per_sec(struct sock *sk, u64 rate)
{
return bbr_rate_bytes_per_sec(sk, rate, BBR_UNIT, 0);
}
static u64 bbr_rate_kbps(struct sock *sk, u64 rate)
{
rate = bbr_bw_bytes_per_sec(sk, rate);
rate *= 8;
do_div(rate, 1000);
return rate;
}
static u32 bbr_tso_segs_goal(struct sock *sk);
static void bbr_debug(struct sock *sk, u32 acked,
const struct rate_sample *rs, struct bbr_context *ctx)
{
static const char ca_states[] = {
[TCP_CA_Open] = 'O',
[TCP_CA_Disorder] = 'D',
[TCP_CA_CWR] = 'C',
[TCP_CA_Recovery] = 'R',
[TCP_CA_Loss] = 'L',
};
static const char mode[] = {
'G', /* Growing - BBR_STARTUP */
'D', /* Drain - BBR_DRAIN */
'W', /* Window - BBR_PROBE_BW */
'M', /* Min RTT - BBR_PROBE_RTT */
};
static const char ack_phase[] = { /* bbr_ack_phase strings */
'I', /* BBR_ACKS_INIT - 'Init' */
'R', /* BBR_ACKS_REFILLING - 'Refilling' */
'B', /* BBR_ACKS_PROBE_STARTING - 'Before' */
'F', /* BBR_ACKS_PROBE_FEEDBACK - 'Feedback' */
'A', /* BBR_ACKS_PROBE_STOPPING - 'After' */
};
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
const u32 una = tp->snd_una - bbr->debug.snd_isn;
const u32 fack = tcp_highest_sack_seq(tp);
const u16 dport = ntohs(inet_sk(sk)->inet_dport);
bool is_port_match = (bbr_debug_port_mask &&
((dport & bbr_debug_port_mask) == 0));
char debugmsg[320];
if (sk->sk_state == TCP_SYN_SENT)
return; /* no bbr_init() yet if SYN retransmit -> CA_Loss */
if (!tp->snd_cwnd || tp->snd_cwnd > bbr_cwnd_warn_val) {
char addr[INET6_ADDRSTRLEN + 10] = { 0 };
if (sk->sk_family == AF_INET)
snprintf(addr, sizeof(addr), "%pI4:%u",
&inet_sk(sk)->inet_daddr, dport);
else if (sk->sk_family == AF_INET6)
snprintf(addr, sizeof(addr), "%pI6:%u",
&sk->sk_v6_daddr, dport);
WARN_ONCE(1,
"BBR %s cwnd alert: %u "
"snd_una: %u ca: %d pacing_gain: %u cwnd_gain: %u "
"bw: %u rtt: %u min_rtt: %u "
"acked: %u tso_segs: %u "
"bw: %d %ld %d pif: %u\n",
addr, tp->snd_cwnd,
una, inet_csk(sk)->icsk_ca_state,
bbr->pacing_gain, bbr->cwnd_gain,
bbr_max_bw(sk), (tp->srtt_us >> 3), bbr->min_rtt_us,
acked, bbr_tso_segs_goal(sk),
rs->delivered, rs->interval_us, rs->is_retrans,
tcp_packets_in_flight(tp));
}
if (likely(!bbr_debug_with_printk && !bbr_debug_ftrace))
return;
if (!sock_flag(sk, SOCK_DBG) && !is_port_match)
return;
if (!ctx->log && !tp->app_limited && !(bbr_flags & FLAG_DEBUG_VERBOSE))
return;
if (ipv4_is_loopback(inet_sk(sk)->inet_daddr) &&
!(bbr_flags & FLAG_DEBUG_LOOPBACK))
return;
snprintf(debugmsg, sizeof(debugmsg) - 1,
"BBR %pI4:%-5u %5u,%03u:%-7u %c "
"%c %2u br %2u cr %2d rtt %5ld d %2d i %5ld mrtt %d %cbw %llu "
"bw %llu lb %llu ib %llu qb %llu "
"a %u if %2u %c %c dl %u l %u al %u # %u t %u %c %c "
"lr %d er %d ea %d bwl %lld il %d ih %d c %d "
"v %d %c %u %c %s\n",
&inet_sk(sk)->inet_daddr, dport,
una / 1000, una % 1000, fack - tp->snd_una,
ca_states[inet_csk(sk)->icsk_ca_state],
bbr->debug.undo ? '@' : mode[bbr->mode],
tp->snd_cwnd,
bbr_extra_acked(sk), /* br (legacy): extra_acked */
rs->tx_in_flight, /* cr (legacy): tx_inflight */
rs->rtt_us,
rs->delivered,
rs->interval_us,
bbr->min_rtt_us,
rs->is_app_limited ? '_' : 'l',
bbr_rate_kbps(sk, ctx->sample_bw), /* lbw: latest sample bw */
bbr_rate_kbps(sk, bbr_max_bw(sk)), /* bw: max bw */
0ULL, /* lb: [obsolete] */
0ULL, /* ib: [obsolete] */
(u64)sk->sk_pacing_rate * 8 / 1000,
acked,
tcp_packets_in_flight(tp),
rs->is_ack_delayed ? 'd' : '.',
bbr->round_start ? '*' : '.',
tp->delivered, tp->lost,
tp->app_limited,
0, /* #: [obsolete] */
ctx->target_cwnd,
tp->reord_seen ? 'r' : '.', /* r: reordering seen? */
ca_states[bbr->prev_ca_state],
(rs->lost + rs->delivered) > 0 ?
(1000 * rs->lost /
(rs->lost + rs->delivered)) : 0, /* lr: loss rate x1000 */
(rs->delivered) > 0 ?
(1000 * rs->delivered_ce /
(rs->delivered)) : 0, /* er: ECN rate x1000 */
1000 * bbr->ecn_alpha >> BBR_SCALE, /* ea: ECN alpha x1000 */
bbr->bw_lo == ~0U ?
-1 : (s64)bbr_rate_kbps(sk, bbr->bw_lo), /* bwl */
bbr->inflight_lo, /* il */
bbr->inflight_hi, /* ih */
bbr->bw_probe_up_cnt, /* c */
2, /* v: version */
bbr->debug.event,
bbr->cycle_idx,
ack_phase[bbr->ack_phase],
bbr->bw_probe_samples ? "Y" : "N");
debugmsg[sizeof(debugmsg) - 1] = 0;
/* printk takes a higher precedence. */
if (bbr_debug_with_printk)
printk(KERN_DEBUG "%s", debugmsg);
if (unlikely(bbr->debug.undo))
bbr->debug.undo = 0;
}
/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
{
u64 rate = bw;
rate = bbr_rate_bytes_per_sec(sk, rate, gain,
bbr_pacing_margin_percent);
rate = min_t(u64, rate, sk->sk_max_pacing_rate);
return rate;
}
/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
u64 bw;
u32 rtt_us;
if (tp->srtt_us) { /* any RTT sample yet? */
rtt_us = max(tp->srtt_us >> 3, 1U);
bbr->has_seen_rtt = 1;
} else { /* no RTT sample yet */
rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
}
bw = (u64)tp->snd_cwnd * BW_UNIT;
do_div(bw, rtt_us);
sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr->params.high_gain);
}
/* Pace using current bw estimate and a gain factor. */
static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
bbr_init_pacing_rate_from_rtt(sk);
if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
sk->sk_pacing_rate = rate;
}
static u32 bbr_min_tso_segs(struct sock *sk)
{
return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
}
/* Return the number of segments BBR would like in a TSO/GSO skb, given
* a particular max gso size as a constraint.
*/
static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
u32 gso_max_size)
{
struct bbr *bbr = inet_csk_ca(sk);
u32 segs, r;
u64 bytes;
/* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
/* Budget a TSO/GSO burst size allowance based on min_rtt. For every
* K = 2^tso_rtt_shift microseconds of min_rtt, halve the burst.
* The min_rtt-based burst allowance is: 64 KBytes / 2^(min_rtt/K)
*/
if (bbr->params.tso_rtt_shift) {
r = bbr->min_rtt_us >> bbr->params.tso_rtt_shift;
if (r < BITS_PER_TYPE(u32)) /* prevent undefined behavior */
bytes += GSO_MAX_SIZE >> r;
}
bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
return segs;
}
/* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
static u32 bbr_tso_segs(struct sock *sk, unsigned int mss_now)
{
return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
}
/* Like bbr_tso_segs(), using mss_cache, ignoring driver's sk_gso_max_size. */
static u32 bbr_tso_segs_goal(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
return bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
}
/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
static void bbr_save_cwnd(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */
else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
}
static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
if (event == CA_EVENT_TX_START && tp->app_limited) {
bbr->idle_restart = 1;
bbr->ack_epoch_mstamp = tp->tcp_mstamp;
bbr->ack_epoch_acked = 0;
/* Avoid pointless buffer overflows: pace at est. bw if we don't
* need more speed (we're restarting from idle and app-limited).
*/
if (bbr->mode == BBR_PROBE_BW)
bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
else if (bbr->mode == BBR_PROBE_RTT)
bbr_check_probe_rtt_done(sk);
} else if ((event == CA_EVENT_ECN_IS_CE ||
event == CA_EVENT_ECN_NO_CE) &&
bbr_ecn_enable &&
bbr->params.precise_ece_ack) {
u32 state = bbr->ce_state;
dctcp_ece_ack_update(sk, event, &bbr->prior_rcv_nxt, &state);
bbr->ce_state = state;
if (tp->fast_ack_mode == 2 && event == CA_EVENT_ECN_IS_CE)
tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
}
}
/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
*
* bdp = ceil(bw * min_rtt * gain)
*
* The key factor, gain, controls the amount of queue. While a small gain
* builds a smaller queue, it becomes more vulnerable to noise in RTT
* measurements (e.g., delayed ACKs or other ACK compression effects). This
* noise may cause BBR to under-estimate the rate.
*/
static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
{
struct bbr *bbr = inet_csk_ca(sk);
u32 bdp;
u64 w;
/* If we've never had a valid RTT sample, cap cwnd at the initial
* default. This should only happen when the connection is not using TCP
* timestamps and has retransmitted all of the SYN/SYNACK/data packets
* ACKed so far. In this case, an RTO can cut cwnd to 1, in which
* case we need to slow-start up toward something safe: initial cwnd.
*/
if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */
return bbr->init_cwnd; /* be safe: cap at initial cwnd */
w = (u64)bw * bbr->min_rtt_us;
/* Apply a gain to the given value, remove the BW_SCALE shift, and
* round the value up to avoid a negative feedback loop.
*/
bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
return bdp;
}
/* To achieve full performance in high-speed paths, we budget enough cwnd to
* fit full-sized skbs in-flight on both end hosts to fully utilize the path:
* - one skb in sending host Qdisc,
* - one skb in sending host TSO/GSO engine
* - one skb being received by receiver host LRO/GRO/delayed-ACK engine
* Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
* in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
* which allows 2 outstanding 2-packet sequences, to try to keep pipe