From f44e077c31b5d670438928b91e36feb1b7e1bd41 Mon Sep 17 00:00:00 2001
From: Shengjing Zhu <zhsj@debian.org>
Date: Wed, 7 Aug 2024 10:35:51 +0800
Subject: [PATCH] Bump gvisor.dev/gvisor to 20240729.0

Signed-off-by: Shengjing Zhu <zhsj@debian.org>
---
 go.mod                                        |    6 +-
 go.sum                                        |    8 +-
 pkg/services/dhcp/dhcp.go                     |    2 +-
 pkg/services/forwarder/udp.go                 |    2 +-
 pkg/tap/link.go                               |   21 +-
 pkg/tap/switch.go                             |   10 +-
 vendor/golang.org/x/time/rate/rate.go         |    2 +
 .../gvisor/pkg/atomicbitops/32b_32bit.go      |   82 +-
 .../gvisor/pkg/atomicbitops/32b_64bit.go      |   82 +-
 .../atomicbitops_32bit_state_autogen.go       |   36 +-
 ...atomicbitops_32bit_unsafe_state_autogen.go |   10 +-
 .../atomicbitops_64bit_state_autogen.go       |   44 +-
 .../atomicbitops_state_autogen.go             |   32 +-
 .../gvisor/pkg/atomicbitops/bool.go           |   71 -
 vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go |   36 +-
 .../gvisor/pkg/buffer/buffer_state.go         |    6 +-
 .../gvisor/pkg/buffer/buffer_state_autogen.go |   52 +-
 vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go  |    6 +-
 .../gvisor/pkg/buffer/chunk_refs.go           |    3 +-
 vendor/gvisor.dev/gvisor/pkg/buffer/view.go   |    2 +-
 .../gvisor.dev/gvisor/pkg/buffer/view_list.go |   76 +-
 vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go   |    8 +-
 .../gvisor/pkg/cpuid/cpuid_amd64.go           |   48 +-
 .../pkg/cpuid/cpuid_amd64_state_autogen.go    |   16 +-
 .../gvisor/pkg/cpuid/cpuid_arm64.go           |    2 +-
 .../pkg/cpuid/cpuid_arm64_state_autogen.go    |    6 +-
 .../gvisor/pkg/cpuid/cpuid_state_autogen.go   |    6 +-
 .../gvisor/pkg/cpuid/features_amd64.go        |   76 +-
 .../gvisor/pkg/cpuid/native_amd64.go          |    3 +
 .../gvisor/pkg/cpuid/native_amd64.s           |   13 +
 .../gvisor/pkg/cpuid/static_amd64.go          |    4 +-
 .../goid/{goid_amd64.s => goid_122_amd64.s}   |    2 +
 .../goid/{goid_arm64.s => goid_122_arm64.s}   |    2 +
 .../goid_123_amd64.s}                         |   17 +-
 .../gvisor/pkg/goid/goid_123_arm64.s          |   26 +
 vendor/gvisor.dev/gvisor/pkg/log/json.go      |   13 +-
 vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go  |   13 +-
 vendor/gvisor.dev/gvisor/pkg/log/log.go       |    4 +-
 vendor/gvisor.dev/gvisor/pkg/rand/rand.go     |    2 -
 .../gvisor.dev/gvisor/pkg/rand/rand_linux.go  |    2 -
 vendor/gvisor.dev/gvisor/pkg/rand/rng.go      |  131 ++
 .../gvisor/pkg/sleep/sleep_unsafe.go          |    7 +-
 .../pkg/sleep/sleep_unsafe_state_autogen.go   |   14 +-
 .../gvisor.dev/gvisor/pkg/state/addr_set.go   |  653 ++++--
 vendor/gvisor.dev/gvisor/pkg/state/decode.go  |   13 +-
 vendor/gvisor.dev/gvisor/pkg/state/encode.go  |   13 +-
 vendor/gvisor.dev/gvisor/pkg/state/state.go   |    7 +-
 vendor/gvisor.dev/gvisor/pkg/state/types.go   |    2 +-
 .../gvisor.dev/gvisor/pkg/state/wire/wire.go  |  170 +-
 .../gvisor.dev/gvisor/pkg/sync/gate_unsafe.go |    4 +-
 .../gvisor/pkg/sync/runtime_constants.go      |    7 -
 .../gvisor/pkg/sync/runtime_exectracer2.go    |   21 +
 .../gvisor/pkg/sync/runtime_spinning_other.s  |    2 +-
 .../gvisor/pkg/sync/runtime_unsafe.go         |    9 +-
 .../gvisor/pkg/tcpip/adapters/gonet/gonet.go  |   15 +-
 vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go  |   17 +-
 .../gvisor/pkg/tcpip/errors_linux.go          |   74 +
 .../gvisor/pkg/tcpip/header/checksum.go       |    3 +
 .../gvisor.dev/gvisor/pkg/tcpip/header/eth.go |    5 +-
 .../pkg/tcpip/header/header_state_autogen.go  |   14 +-
 .../gvisor/pkg/tcpip/header/icmpv6.go         |    2 +-
 .../gvisor/pkg/tcpip/header/igmpv3.go         |    2 +-
 .../gvisor/pkg/tcpip/header/ipv4.go           |   21 +-
 .../gvisor/pkg/tcpip/header/ipv6.go           |   12 +
 .../tcpip/header/ipv6_extension_headers.go    |   12 +-
 .../gvisor/pkg/tcpip/header/mldv2.go          |    2 +-
 .../pkg/tcpip/header/ndp_neighbor_advert.go   |    2 +-
 .../pkg/tcpip/header/ndp_neighbor_solicit.go  |    2 +-
 .../gvisor/pkg/tcpip/header/ndp_options.go    |   20 +-
 .../pkg/tcpip/header/ndp_router_advert.go     |    2 +-
 .../pkg/tcpip/header/ndp_router_solicit.go    |    2 +-
 .../gvisor/pkg/tcpip/header/parse/parse.go    |   14 +-
 .../gvisor.dev/gvisor/pkg/tcpip/header/tcp.go |   11 +-
 .../tcpip/internal/tcp/tcp_state_autogen.go   |    6 +-
 .../gvisor/pkg/tcpip/link/nested/nested.go    |   34 +-
 .../tcpip/link/nested/nested_state_autogen.go |   41 +
 .../gvisor/pkg/tcpip/link/sniffer/pcap.go     |    2 +-
 .../gvisor/pkg/tcpip/link/sniffer/sniffer.go  |   41 +-
 .../link/sniffer/sniffer_state_autogen.go     |   44 +
 .../gvisor/pkg/tcpip/network/arp/arp.go       |   16 +-
 .../tcpip/network/arp/arp_state_autogen.go    |  216 ++
 .../gvisor/pkg/tcpip/network/arp/stats.go     |    4 +
 .../internal/fragmentation/fragmentation.go   |   20 +-
 .../fragmentation_state_autogen.go            |  186 +-
 .../internal/fragmentation/reassembler.go     |   12 +-
 .../ip/duplicate_address_detection.go         |   12 +-
 .../pkg/tcpip/network/internal/ip/errors.go   |    4 +-
 .../internal/ip/generic_multicast_protocol.go |   25 +-
 .../network/internal/ip/ip_state_autogen.go   |  386 +++-
 .../pkg/tcpip/network/internal/ip/stats.go    |    6 +-
 .../multicast/multicast_state_autogen.go      |  134 ++
 .../network/internal/multicast/route_table.go |   22 +-
 .../gvisor/pkg/tcpip/network/ipv4/icmp.go     |   13 +-
 .../gvisor/pkg/tcpip/network/ipv4/igmp.go     |   14 +-
 .../gvisor/pkg/tcpip/network/ipv4/ipv4.go     |  132 +-
 .../tcpip/network/ipv4/ipv4_state_autogen.go  |  578 ++++-
 .../gvisor/pkg/tcpip/network/ipv4/stats.go    |   11 +
 .../gvisor/pkg/tcpip/ports/flags.go           |    4 +-
 .../gvisor/pkg/tcpip/ports/ports.go           |   53 +-
 .../pkg/tcpip/ports/ports_state_autogen.go    |  125 +-
 .../gro_packet_list.go => route_list.go}      |   86 +-
 .../gvisor.dev/gvisor/pkg/tcpip/socketops.go  |   14 +
 .../pkg/tcpip/stack/address_state_mutex.go    |    2 +-
 .../pkg/tcpip/stack/address_state_refs.go     |    3 +-
 .../tcpip/stack/addressable_endpoint_state.go |   50 +-
 .../stack/addressable_endpoint_state_mutex.go |    2 +-
 .../gvisor/pkg/tcpip/stack/bridge.go          |  229 ++
 .../gvisor/pkg/tcpip/stack/bridge_mutex.go    |   96 +
 .../gvisor/pkg/tcpip/stack/bucket_mutex.go    |    2 +-
 .../tcpip/stack/cleanup_endpoints_mutex.go    |    2 +-
 .../gvisor/pkg/tcpip/stack/conn_mutex.go      |    2 +-
 .../pkg/tcpip/stack/conn_track_mutex.go       |    2 +-
 .../gvisor/pkg/tcpip/stack/conntrack.go       |   71 +-
 .../pkg/tcpip/stack/endpoints_by_nic_mutex.go |    2 +-
 .../gvisor.dev/gvisor/pkg/tcpip/stack/gro.go  |  730 -------
 .../gvisor/pkg/tcpip/stack/icmp_rate_limit.go |    5 +-
 .../gvisor/pkg/tcpip/stack/iptables.go        |   99 +-
 .../gvisor/pkg/tcpip/stack/iptables_mutex.go  |    2 +-
 .../pkg/tcpip/stack/iptables_targets.go       |   90 +-
 .../gvisor/pkg/tcpip/stack/iptables_types.go  |   36 +-
 .../tcpip/stack/multi_port_endpoint_mutex.go  |    2 +-
 .../gvisor/pkg/tcpip/stack/neighbor_cache.go  |   39 +-
 .../pkg/tcpip/stack/neighbor_cache_mutex.go   |    2 +-
 .../pkg/tcpip/stack/neighbor_entry_mutex.go   |    2 +-
 .../gvisor.dev/gvisor/pkg/tcpip/stack/nic.go  |   97 +-
 .../gvisor/pkg/tcpip/stack/nic_mutex.go       |    2 +-
 .../gvisor/pkg/tcpip/stack/nic_stats.go       |    4 +
 .../gvisor.dev/gvisor/pkg/tcpip/stack/nud.go  |   39 +-
 .../gvisor/pkg/tcpip/stack/packet_buffer.go   |   86 +-
 .../pkg/tcpip/stack/packet_buffer_list.go     |   12 +
 .../pkg/tcpip/stack/packet_buffer_refs.go     |    3 +-
 .../pkg/tcpip/stack/packet_buffer_unsafe.go   |    4 +-
 .../tcpip/stack/packet_endpoint_list_mutex.go |    2 +-
 .../pkg/tcpip/stack/packet_eps_mutex.go       |    2 +-
 .../packets_pending_link_resolution_mutex.go  |    2 +-
 .../gvisor/pkg/tcpip/stack/pending_packets.go |   41 +-
 .../gvisor/pkg/tcpip/stack/registration.go    |   88 +-
 .../gvisor/pkg/tcpip/stack/route.go           |   49 +-
 .../gvisor/pkg/tcpip/stack/route_mutex.go     |    2 +-
 .../pkg/tcpip/stack/route_stack_mutex.go      |    2 +-
 .../gvisor/pkg/tcpip/stack/stack.go           |  487 +++--
 .../gvisor/pkg/tcpip/stack/stack_mutex.go     |    2 +-
 .../pkg/tcpip/stack/stack_state_autogen.go    | 1857 +++++++++++++---
 .../pkg/tcpip/stack/state_conn_mutex.go       |    2 +-
 .../gvisor.dev/gvisor/pkg/tcpip/stack/tcp.go  |   34 +
 .../pkg/tcpip/stack/transport_demuxer.go      |   26 +-
 .../tcpip/stack/transport_endpoints_mutex.go  |    2 +-
 .../gvisor.dev/gvisor/pkg/tcpip/stdclock.go   |    1 +
 .../gvisor/pkg/tcpip/stdclock_state.go        |    7 +-
 vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go   |  182 +-
 .../pkg/tcpip/tcpip_linux_state_autogen.go    |    6 +
 .../gvisor/pkg/tcpip/tcpip_state.go           |    3 +-
 .../gvisor/pkg/tcpip/tcpip_state_autogen.go   | 1946 +++++++++++++++--
 vendor/gvisor.dev/gvisor/pkg/tcpip/timer.go   |    9 +-
 .../pkg/tcpip/transport/icmp/endpoint.go      |   19 +-
 .../tcpip/transport/icmp/endpoint_state.go    |   17 +-
 .../transport/icmp/icmp_state_autogen.go      |   88 +-
 .../pkg/tcpip/transport/icmp/protocol.go      |    6 +-
 .../transport/internal/network/endpoint.go    |   49 +-
 .../internal/network/network_state_autogen.go |   10 +-
 .../tcpip/transport/internal/noop/endpoint.go |    2 +-
 .../internal/noop/noop_state_autogen.go       |    6 +-
 .../pkg/tcpip/transport/packet/endpoint.go    |   15 +-
 .../tcpip/transport/packet/endpoint_state.go  |   15 +-
 .../transport/packet/packet_state_autogen.go  |   20 +-
 .../pkg/tcpip/transport/raw/endpoint.go       |    6 +-
 .../pkg/tcpip/transport/raw/endpoint_state.go |   17 +-
 .../pkg/tcpip/transport/raw/protocol.go       |    6 +-
 .../tcpip/transport/raw/raw_state_autogen.go  |   64 +-
 .../gvisor/pkg/tcpip/transport/tcp/accept.go  |   90 +-
 .../gvisor/pkg/tcpip/transport/tcp/connect.go |  183 +-
 .../gvisor/pkg/tcpip/transport/tcp/cubic.go   |  104 +-
 .../pkg/tcpip/transport/tcp/dispatcher.go     |   61 +-
 .../pkg/tcpip/transport/tcp/endpoint.go       |  343 +--
 .../pkg/tcpip/transport/tcp/endpoint_state.go |   46 +-
 .../pkg/tcpip/transport/tcp/forwarder.go      |    2 +-
 .../pkg/tcpip/transport/tcp/protocol.go       |   67 +-
 .../gvisor/pkg/tcpip/transport/tcp/rack.go    |   18 +-
 .../gvisor/pkg/tcpip/transport/tcp/rcv.go     |    6 +-
 .../gvisor/pkg/tcpip/transport/tcp/reno.go    |    6 +-
 .../gvisor/pkg/tcpip/transport/tcp/segment.go |   28 +-
 .../pkg/tcpip/transport/tcp/segment_queue.go  |    2 +-
 .../pkg/tcpip/transport/tcp/segment_state.go  |    6 +-
 .../gvisor/pkg/tcpip/transport/tcp/snd.go     |  145 +-
 .../tcpip/transport/tcp/tcp_endpoint_list.go  |   32 +-
 .../tcpip/transport/tcp/tcp_segment_refs.go   |    3 +-
 .../tcpip/transport/tcp/tcp_state_autogen.go  |  514 +++--
 .../gvisor/pkg/tcpip/transport/tcp/timer.go   |   36 +-
 .../transport/tcpconntrack/tcp_conntrack.go   |    9 +-
 .../tcpconntrack_state_autogen.go             |   88 +
 .../pkg/tcpip/transport/udp/endpoint.go       |   43 +-
 .../pkg/tcpip/transport/udp/endpoint_state.go |   17 +-
 .../pkg/tcpip/transport/udp/forwarder.go      |    4 +-
 .../pkg/tcpip/transport/udp/protocol.go       |    5 +-
 .../tcpip/transport/udp/udp_state_autogen.go  |  113 +-
 vendor/gvisor.dev/gvisor/pkg/waiter/waiter.go |    4 +-
 .../gvisor/pkg/waiter/waiter_state_autogen.go |   18 +-
 vendor/modules.txt                            |    8 +-
 198 files changed, 9827 insertions(+), 3361 deletions(-)
 delete mode 100644 vendor/gvisor.dev/gvisor/pkg/atomicbitops/bool.go
 rename vendor/gvisor.dev/gvisor/pkg/goid/{goid_amd64.s => goid_122_amd64.s} (97%)
 rename vendor/gvisor.dev/gvisor/pkg/goid/{goid_arm64.s => goid_122_arm64.s} (97%)
 rename vendor/gvisor.dev/gvisor/pkg/{tcpip/stack/stack_global_state.go => goid/goid_123_amd64.s} (67%)
 create mode 100644 vendor/gvisor.dev/gvisor/pkg/goid/goid_123_arm64.s
 create mode 100644 vendor/gvisor.dev/gvisor/pkg/rand/rng.go
 create mode 100644 vendor/gvisor.dev/gvisor/pkg/sync/runtime_exectracer2.go
 create mode 100644 vendor/gvisor.dev/gvisor/pkg/tcpip/errors_linux.go
 rename vendor/gvisor.dev/gvisor/pkg/tcpip/{stack/gro_packet_list.go => route_list.go} (61%)
 create mode 100644 vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bridge.go
 create mode 100644 vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bridge_mutex.go
 delete mode 100644 vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go
 create mode 100644 vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_linux_state_autogen.go

diff --git a/go.mod b/go.mod
index 42b3851a..5dfdb294 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
 module github.com/containers/gvisor-tap-vsock
 
-go 1.21
+go 1.22.0
 
 require (
 	github.com/Microsoft/go-winio v0.6.2
@@ -26,7 +26,7 @@ require (
 	golang.org/x/crypto v0.25.0
 	golang.org/x/sync v0.7.0
 	golang.org/x/sys v0.22.0
-	gvisor.dev/gvisor v0.0.0-20231023213702-2691a8f9b1cf
+	gvisor.dev/gvisor v0.0.0-20240726212243-a2b0498dbe7d
 )
 
 require (
@@ -43,7 +43,7 @@ require (
 	golang.org/x/mod v0.18.0 // indirect
 	golang.org/x/net v0.26.0 // indirect
 	golang.org/x/text v0.16.0 // indirect
-	golang.org/x/time v0.3.0 // indirect
+	golang.org/x/time v0.5.0 // indirect
 	golang.org/x/tools v0.22.0 // indirect
 	gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index 02b63195..ece9fe7c 100644
--- a/go.sum
+++ b/go.sum
@@ -138,8 +138,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
 golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
-golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
-golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -169,5 +169,5 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gvisor.dev/gvisor v0.0.0-20231023213702-2691a8f9b1cf h1:0A28IFBR6VcMacM0m6Rn5/nr8pk8xa2TyIkjSaFAOPc=
-gvisor.dev/gvisor v0.0.0-20231023213702-2691a8f9b1cf/go.mod h1:8hmigyCdYtw5xJGfQDJzSH5Ju8XEIDBnpyi8+O6GRt8=
+gvisor.dev/gvisor v0.0.0-20240726212243-a2b0498dbe7d h1:nF+dSOz0u0DLrhnOmGp3ocPAylsgpim29DGIt/oxNR4=
+gvisor.dev/gvisor v0.0.0-20240726212243-a2b0498dbe7d/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU=
diff --git a/pkg/services/dhcp/dhcp.go b/pkg/services/dhcp/dhcp.go
index cfd5370e..f0218fbc 100644
--- a/pkg/services/dhcp/dhcp.go
+++ b/pkg/services/dhcp/dhcp.go
@@ -89,7 +89,7 @@ func dial(s *stack.Stack, nic int) (*gonet.UDPConn, error) {
 		return nil, errors.New(err.String())
 	}
 
-	return gonet.NewUDPConn(s, &wq, ep), nil
+	return gonet.NewUDPConn(&wq, ep), nil
 }
 
 type Server struct {
diff --git a/pkg/services/forwarder/udp.go b/pkg/services/forwarder/udp.go
index 3226b14c..4ab632e3 100644
--- a/pkg/services/forwarder/udp.go
+++ b/pkg/services/forwarder/udp.go
@@ -35,7 +35,7 @@ func UDP(s *stack.Stack, nat map[tcpip.Address]tcpip.Address, natLock *sync.Mute
 			return
 		}
 
-		p, _ := NewUDPProxy(&autoStoppingListener{underlying: gonet.NewUDPConn(s, &wq, ep)}, func() (net.Conn, error) {
+		p, _ := NewUDPProxy(&autoStoppingListener{underlying: gonet.NewUDPConn(&wq, ep)}, func() (net.Conn, error) {
 			return net.Dial("udp", fmt.Sprintf("%s:%d", localAddress, r.ID().LocalPort))
 		})
 		go p.Run()
diff --git a/pkg/tap/link.go b/pkg/tap/link.go
index b85ce43d..4ab0859b 100644
--- a/pkg/tap/link.go
+++ b/pkg/tap/link.go
@@ -56,14 +56,14 @@ func (e *LinkEndpoint) IsAttached() bool {
 	return e.dispatcher != nil
 }
 
-func (e *LinkEndpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
+func (e *LinkEndpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
 	e.dispatcher.DeliverNetworkPacket(protocol, pkt)
 }
 
-func (e *LinkEndpoint) AddHeader(_ stack.PacketBufferPtr) {
+func (e *LinkEndpoint) AddHeader(_ *stack.PacketBuffer) {
 }
 
-func (e *LinkEndpoint) ParseHeader(stack.PacketBufferPtr) bool { return true }
+func (e *LinkEndpoint) ParseHeader(*stack.PacketBuffer) bool { return true }
 
 func (e *LinkEndpoint) Capabilities() stack.LinkEndpointCapabilities {
 	return stack.CapabilityResolutionRequired | stack.CapabilityRXChecksumOffload
@@ -73,6 +73,10 @@ func (e *LinkEndpoint) LinkAddress() tcpip.LinkAddress {
 	return e.mac
 }
 
+func (e *LinkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) {
+	e.mac = addr
+}
+
 func (e *LinkEndpoint) MaxHeaderLength() uint16 {
 	return uint16(header.EthernetMinimumSize)
 }
@@ -81,9 +85,14 @@ func (e *LinkEndpoint) MTU() uint32 {
 	return uint32(e.mtu)
 }
 
-func (e *LinkEndpoint) Wait() {
+func (e *LinkEndpoint) SetMTU(mtu uint32) {
+	e.mtu = int(mtu)
 }
 
+func (e *LinkEndpoint) Wait()                          {}
+func (e *LinkEndpoint) Close()                         {}
+func (e *LinkEndpoint) SetOnCloseAction(action func()) {}
+
 func (e *LinkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {
 	n := 0
 	for _, p := range pkts.AsSlice() {
@@ -95,7 +104,7 @@ func (e *LinkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Err
 	return n, nil
 }
 
-func (e *LinkEndpoint) writePacket(r stack.RouteInfo, protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) tcpip.Error {
+func (e *LinkEndpoint) writePacket(r stack.RouteInfo, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) tcpip.Error {
 	// Preserve the src address if it's set in the route.
 	srcAddr := e.LinkAddress()
 	if r.LocalLinkAddress != "" {
@@ -128,7 +137,7 @@ func (e *LinkEndpoint) writePacket(r stack.RouteInfo, protocol tcpip.NetworkProt
 	return nil
 }
 
-func (e *LinkEndpoint) WriteRawPacket(_ stack.PacketBufferPtr) tcpip.Error {
+func (e *LinkEndpoint) WriteRawPacket(_ *stack.PacketBuffer) tcpip.Error {
 	return &tcpip.ErrNotSupported{}
 }
 
diff --git a/pkg/tap/switch.go b/pkg/tap/switch.go
index 5f1d16f3..189f0d65 100644
--- a/pkg/tap/switch.go
+++ b/pkg/tap/switch.go
@@ -21,13 +21,13 @@ import (
 )
 
 type VirtualDevice interface {
-	DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr)
+	DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer)
 	LinkAddress() tcpip.LinkAddress
 	IP() string
 }
 
 type NetworkSwitch interface {
-	DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr)
+	DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer)
 }
 
 type Switch struct {
@@ -72,7 +72,7 @@ func (e *Switch) Connect(ep VirtualDevice) {
 	e.gateway = ep
 }
 
-func (e *Switch) DeliverNetworkPacket(_ tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
+func (e *Switch) DeliverNetworkPacket(_ tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
 	if err := e.tx(pkt); err != nil {
 		log.Error(err)
 	}
@@ -111,11 +111,11 @@ func (e *Switch) connect(conn protocolConn) (int, bool) {
 	return id, false
 }
 
-func (e *Switch) tx(pkt stack.PacketBufferPtr) error {
+func (e *Switch) tx(pkt *stack.PacketBuffer) error {
 	return e.txPkt(pkt)
 }
 
-func (e *Switch) txPkt(pkt stack.PacketBufferPtr) error {
+func (e *Switch) txPkt(pkt *stack.PacketBuffer) error {
 	e.writeLock.Lock()
 	defer e.writeLock.Unlock()
 
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index f0e0cf3c..8f6c7f49 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit {
 // or its associated context.Context is canceled.
 //
 // The methods AllowN, ReserveN, and WaitN consume n tokens.
+//
+// Limiter is safe for simultaneous use by multiple goroutines.
 type Limiter struct {
 	mu     sync.Mutex
 	limit  Limit
diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go
index c44610db..d2ab60ec 100644
--- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go
+++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go
@@ -77,9 +77,6 @@ func (i *Int32) Store(v int32) {
 //
 // It may be helpful to document why a racy operation is permitted.
 //
-// Don't add fields to this struct. It is important that it remain the same
-// size as its builtin analogue.
-//
 //go:nosplit
 func (i *Int32) RacyStore(v int32) {
 	i.value = v
@@ -124,6 +121,9 @@ func (i *Int32) ptr() *int32 {
 
 // Uint32 is an atomic uint32.
 //
+// Don't add fields to this struct. It is important that it remain the same
+// size as its builtin analogue.
+//
 // See aligned_unsafe.go in this directory for justification.
 //
 // +stateify savable
@@ -210,4 +210,80 @@ func (u *Uint32) ptr() *uint32 {
 	return &u.value
 }
 
+// Bool is an atomic Boolean.
+//
+// It is implemented by a Uint32, with value 0 indicating false, and 1
+// indicating true.
+//
+// +stateify savable
+type Bool struct {
+	Uint32
+}
+
+// b32 returns a uint32 0 or 1 representing b.
+func b32(b bool) uint32 {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+// FromBool returns a Bool initialized to value val.
+//
+//go:nosplit
+func FromBool(val bool) Bool {
+	return Bool{
+		Uint32: FromUint32(b32(val)),
+	}
+}
+
+// Load is analogous to atomic.LoadBool, if such a thing existed.
+//
+//go:nosplit
+func (b *Bool) Load() bool {
+	return b.Uint32.Load() != 0
+}
+
+// RacyLoad is analogous to reading an atomic value without using
+// synchronization.
+//
+// It may be helpful to document why a racy operation is permitted.
+//
+//go:nosplit
+func (b *Bool) RacyLoad() bool {
+	return b.Uint32.RacyLoad() != 0
+}
+
+// Store is analogous to atomic.StoreBool, if such a thing existed.
+//
+//go:nosplit
+func (b *Bool) Store(val bool) {
+	b.Uint32.Store(b32(val))
+}
+
+// RacyStore is analogous to setting an atomic value without using
+// synchronization.
+//
+// It may be helpful to document why a racy operation is permitted.
+//
+//go:nosplit
+func (b *Bool) RacyStore(val bool) {
+	b.Uint32.RacyStore(b32(val))
+}
+
+// Swap is analogous to atomic.SwapBool, if such a thing existed.
+//
+//go:nosplit
+func (b *Bool) Swap(val bool) bool {
+	return b.Uint32.Swap(b32(val)) != 0
+}
+
+// CompareAndSwap is analogous to atomic.CompareAndSwapBool, if such a thing
+// existed.
+//
+//go:nosplit
+func (b *Bool) CompareAndSwap(oldVal, newVal bool) bool {
+	return b.Uint32.CompareAndSwap(b32(oldVal), b32(newVal))
+}
+
 // LINT.ThenChange(32b_64bit.go)
diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go
index 18aa9630..af926eb4 100644
--- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go
+++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go
@@ -77,9 +77,6 @@ func (i *Int32) Store(v int32) {
 //
 // It may be helpful to document why a racy operation is permitted.
 //
-// Don't add fields to this struct. It is important that it remain the same
-// size as its builtin analogue.
-//
 //go:nosplit
 func (i *Int32) RacyStore(v int32) {
 	i.value = v
@@ -124,6 +121,9 @@ func (i *Int32) ptr() *int32 {
 
 // Uint32 is an atomic uint32.
 //
+// Don't add fields to this struct. It is important that it remain the same
+// size as its builtin analogue.
+//
 // See aligned_unsafe.go in this directory for justification.
 //
 // +stateify savable
@@ -210,4 +210,80 @@ func (u *Uint32) ptr() *uint32 {
 	return &u.value
 }
 
+// Bool is an atomic Boolean.
+//
+// It is implemented by a Uint32, with value 0 indicating false, and 1
+// indicating true.
+//
+// +stateify savable
+type Bool struct {
+	Uint32
+}
+
+// b32 returns a uint32 0 or 1 representing b.
+func b32(b bool) uint32 {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+// FromBool returns a Bool initialized to value val.
+//
+//go:nosplit
+func FromBool(val bool) Bool {
+	return Bool{
+		Uint32: FromUint32(b32(val)),
+	}
+}
+
+// Load is analogous to atomic.LoadBool, if such a thing existed.
+//
+//go:nosplit
+func (b *Bool) Load() bool {
+	return b.Uint32.Load() != 0
+}
+
+// RacyLoad is analogous to reading an atomic value without using
+// synchronization.
+//
+// It may be helpful to document why a racy operation is permitted.
+//
+//go:nosplit
+func (b *Bool) RacyLoad() bool {
+	return b.Uint32.RacyLoad() != 0
+}
+
+// Store is analogous to atomic.StoreBool, if such a thing existed.
+//
+//go:nosplit
+func (b *Bool) Store(val bool) {
+	b.Uint32.Store(b32(val))
+}
+
+// RacyStore is analogous to setting an atomic value without using
+// synchronization.
+//
+// It may be helpful to document why a racy operation is permitted.
+//
+//go:nosplit
+func (b *Bool) RacyStore(val bool) {
+	b.Uint32.RacyStore(b32(val))
+}
+
+// Swap is analogous to atomic.SwapBool, if such a thing existed.
+//
+//go:nosplit
+func (b *Bool) Swap(val bool) bool {
+	return b.Uint32.Swap(b32(val)) != 0
+}
+
+// CompareAndSwap is analogous to atomic.CompareAndSwapBool, if such a thing
+// existed.
+//
+//go:nosplit
+func (b *Bool) CompareAndSwap(oldVal, newVal bool) bool {
+	return b.Uint32.CompareAndSwap(b32(oldVal), b32(newVal))
+}
+
 // LINT.ThenChange(32b_32bit.go)
diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go
index 4d3cad36..78e501aa 100644
--- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go
@@ -6,6 +6,8 @@
 package atomicbitops
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -27,10 +29,10 @@ func (i *Int32) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &i.value)
 }
 
-func (i *Int32) afterLoad() {}
+func (i *Int32) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *Int32) StateLoad(stateSourceObject state.Source) {
+func (i *Int32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.value)
 }
 
@@ -52,14 +54,40 @@ func (u *Uint32) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &u.value)
 }
 
-func (u *Uint32) afterLoad() {}
+func (u *Uint32) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (u *Uint32) StateLoad(stateSourceObject state.Source) {
+func (u *Uint32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &u.value)
 }
 
+func (b *Bool) StateTypeName() string {
+	return "pkg/atomicbitops.Bool"
+}
+
+func (b *Bool) StateFields() []string {
+	return []string{
+		"Uint32",
+	}
+}
+
+func (b *Bool) beforeSave() {}
+
+// +checklocksignore
+func (b *Bool) StateSave(stateSinkObject state.Sink) {
+	b.beforeSave()
+	stateSinkObject.Save(0, &b.Uint32)
+}
+
+func (b *Bool) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (b *Bool) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &b.Uint32)
+}
+
 func init() {
 	state.Register((*Int32)(nil))
 	state.Register((*Uint32)(nil))
+	state.Register((*Bool)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go
index b58524da..606a6d02 100644
--- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go
@@ -6,6 +6,8 @@
 package atomicbitops
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -29,10 +31,10 @@ func (i *Int64) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &i.value32)
 }
 
-func (i *Int64) afterLoad() {}
+func (i *Int64) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *Int64) StateLoad(stateSourceObject state.Source) {
+func (i *Int64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.value)
 	stateSourceObject.Load(1, &i.value32)
 }
@@ -57,10 +59,10 @@ func (u *Uint64) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &u.value32)
 }
 
-func (u *Uint64) afterLoad() {}
+func (u *Uint64) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (u *Uint64) StateLoad(stateSourceObject state.Source) {
+func (u *Uint64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &u.value)
 	stateSourceObject.Load(1, &u.value32)
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go
index 894dde88..8e6cd37c 100644
--- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go
@@ -6,6 +6,8 @@
 package atomicbitops
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -27,10 +29,10 @@ func (i *Int32) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &i.value)
 }
 
-func (i *Int32) afterLoad() {}
+func (i *Int32) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *Int32) StateLoad(stateSourceObject state.Source) {
+func (i *Int32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.value)
 }
 
@@ -52,13 +54,38 @@ func (u *Uint32) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &u.value)
 }
 
-func (u *Uint32) afterLoad() {}
+func (u *Uint32) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (u *Uint32) StateLoad(stateSourceObject state.Source) {
+func (u *Uint32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &u.value)
 }
 
+func (b *Bool) StateTypeName() string {
+	return "pkg/atomicbitops.Bool"
+}
+
+func (b *Bool) StateFields() []string {
+	return []string{
+		"Uint32",
+	}
+}
+
+func (b *Bool) beforeSave() {}
+
+// +checklocksignore
+func (b *Bool) StateSave(stateSinkObject state.Sink) {
+	b.beforeSave()
+	stateSinkObject.Save(0, &b.Uint32)
+}
+
+func (b *Bool) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (b *Bool) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &b.Uint32)
+}
+
 func (i *Int64) StateTypeName() string {
 	return "pkg/atomicbitops.Int64"
 }
@@ -77,10 +104,10 @@ func (i *Int64) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &i.value)
 }
 
-func (i *Int64) afterLoad() {}
+func (i *Int64) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *Int64) StateLoad(stateSourceObject state.Source) {
+func (i *Int64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.value)
 }
 
@@ -102,16 +129,17 @@ func (u *Uint64) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &u.value)
 }
 
-func (u *Uint64) afterLoad() {}
+func (u *Uint64) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (u *Uint64) StateLoad(stateSourceObject state.Source) {
+func (u *Uint64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &u.value)
 }
 
 func init() {
 	state.Register((*Int32)(nil))
 	state.Register((*Uint32)(nil))
+	state.Register((*Bool)(nil))
 	state.Register((*Int64)(nil))
 	state.Register((*Uint64)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go
index bc8fc35d..ca763da6 100644
--- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go
@@ -8,6 +8,8 @@
 package atomicbitops
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -29,39 +31,13 @@ func (f *Float64) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &f.bits)
 }
 
-func (f *Float64) afterLoad() {}
+func (f *Float64) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (f *Float64) StateLoad(stateSourceObject state.Source) {
+func (f *Float64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &f.bits)
 }
 
-func (b *Bool) StateTypeName() string {
-	return "pkg/atomicbitops.Bool"
-}
-
-func (b *Bool) StateFields() []string {
-	return []string{
-		"Uint32",
-	}
-}
-
-func (b *Bool) beforeSave() {}
-
-// +checklocksignore
-func (b *Bool) StateSave(stateSinkObject state.Sink) {
-	b.beforeSave()
-	stateSinkObject.Save(0, &b.Uint32)
-}
-
-func (b *Bool) afterLoad() {}
-
-// +checklocksignore
-func (b *Bool) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &b.Uint32)
-}
-
 func init() {
 	state.Register((*Float64)(nil))
-	state.Register((*Bool)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/bool.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/bool.go
deleted file mode 100644
index 60e646e8..00000000
--- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/bool.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2022 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package atomicbitops
-
-import "sync/atomic"
-
-// Bool is an atomic Boolean.
-//
-// It is implemented by a Uint32, with value 0 indicating false, and 1
-// indicating true.
-//
-// +stateify savable
-type Bool struct {
-	Uint32
-}
-
-// FromBool returns an Bool initialized to value val.
-//
-//go:nosplit
-func FromBool(val bool) Bool {
-	var u uint32
-	if val {
-		u = 1
-	}
-	return Bool{
-		Uint32{
-			value: u,
-		},
-	}
-}
-
-// Load is analogous to atomic.LoadBool, if such a thing existed.
-//
-//go:nosplit
-func (b *Bool) Load() bool {
-	return atomic.LoadUint32(&b.value) == 1
-}
-
-// Store is analogous to atomic.StoreBool, if such a thing existed.
-//
-//go:nosplit
-func (b *Bool) Store(val bool) {
-	var u uint32
-	if val {
-		u = 1
-	}
-	atomic.StoreUint32(&b.value, u)
-}
-
-// Swap is analogous to atomic.SwapBool, if such a thing existed.
-//
-//go:nosplit
-func (b *Bool) Swap(val bool) bool {
-	var u uint32
-	if val {
-		u = 1
-	}
-	return atomic.SwapUint32(&b.value, u) == 1
-}
diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go
index cc663ae7..3e6bc6dd 100644
--- a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go
+++ b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go
@@ -28,7 +28,7 @@ import (
 //
 // +stateify savable
 type Buffer struct {
-	data viewList `state:".([]byte)"`
+	data ViewList `state:".([]byte)"`
 	size int64
 }
 
@@ -189,12 +189,9 @@ func (b *Buffer) GrowTo(length int64, zero bool) {
 			sz = int(length - b.size)
 		}
 
-		// Zero the written section; note that this pattern is
-		// specifically recognized and optimized by the compiler.
+		// Zero the written section.
 		if zero {
-			for i := v.write; i < v.write+sz; i++ {
-				v.chunk.data[i] = 0
-			}
+			clear(v.chunk.data[v.write : v.write+sz])
 		}
 
 		// Advance the index.
@@ -401,6 +398,12 @@ func (b *Buffer) Size() int64 {
 	return b.size
 }
 
+// AsViewList returns the ViewList backing b. Users may not save or modify the
+// ViewList returned.
+func (b *Buffer) AsViewList() ViewList {
+	return b.data
+}
+
 // Clone creates a copy-on-write clone of b. The underlying chunks are shared
 // until they are written to.
 func (b *Buffer) Clone() Buffer {
@@ -479,7 +482,7 @@ func (b *Buffer) Checksum(offset int) uint16 {
 // operation completes.
 func (b *Buffer) Merge(other *Buffer) {
 	b.data.PushBackList(&other.data)
-	other.data = viewList{}
+	other.data = ViewList{}
 
 	// Adjust sizes.
 	b.size += other.size
@@ -489,6 +492,18 @@ func (b *Buffer) Merge(other *Buffer) {
 // WriteFromReader writes to the buffer from an io.Reader. A maximum read size
 // of MaxChunkSize is enforced to prevent allocating views from the heap.
 func (b *Buffer) WriteFromReader(r io.Reader, count int64) (int64, error) {
+	return b.WriteFromReaderAndLimitedReader(r, count, nil)
+}
+
+// WriteFromReaderAndLimitedReader is the same as WriteFromReader, but
+// optimized to avoid allocations if a LimitedReader is passed in.
+//
+// This function clobbers the values of lr.
+func (b *Buffer) WriteFromReaderAndLimitedReader(r io.Reader, count int64, lr *io.LimitedReader) (int64, error) {
+	if lr == nil {
+		lr = &io.LimitedReader{}
+	}
+
 	var done int64
 	for done < count {
 		vsize := count - done
@@ -496,8 +511,9 @@ func (b *Buffer) WriteFromReader(r io.Reader, count int64) (int64, error) {
 			vsize = MaxChunkSize
 		}
 		v := NewView(int(vsize))
-		lr := io.LimitedReader{R: r, N: vsize}
-		n, err := io.Copy(v, &lr)
+		lr.R = r
+		lr.N = vsize
+		n, err := io.Copy(v, lr)
 		b.Append(v)
 		done += n
 		if err == io.EOF {
@@ -572,7 +588,7 @@ func (b *Buffer) readByte() (byte, error) {
 	return bt, nil
 }
 
-// AsBufferReader returns the Buffer as a BufferReader capabable of io methods.
+// AsBufferReader returns the Buffer as a BufferReader capable of io methods.
 // The new BufferReader takes ownership of b.
 func (b *Buffer) AsBufferReader() BufferReader {
 	return BufferReader{b}
diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go
index 8b8e15ea..d57dfa02 100644
--- a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go
@@ -14,12 +14,16 @@
 
 package buffer
 
+import (
+	"context"
+)
+
 // saveData is invoked by stateify.
 func (b *Buffer) saveData() []byte {
 	return b.Flatten()
 }
 
 // loadData is invoked by stateify.
-func (b *Buffer) loadData(data []byte) {
+func (b *Buffer) loadData(_ context.Context, data []byte) {
 	*b = MakeWithData(data)
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go
index 7587787c..3e32338f 100644
--- a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go
@@ -3,6 +3,8 @@
 package buffer
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -28,12 +30,12 @@ func (b *Buffer) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &b.size)
 }
 
-func (b *Buffer) afterLoad() {}
+func (b *Buffer) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (b *Buffer) StateLoad(stateSourceObject state.Source) {
+func (b *Buffer) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(1, &b.size)
-	stateSourceObject.LoadValue(0, new([]byte), func(y any) { b.loadData(y.([]byte)) })
+	stateSourceObject.LoadValue(0, new([]byte), func(y any) { b.loadData(ctx, y.([]byte)) })
 }
 
 func (c *chunk) StateTypeName() string {
@@ -56,10 +58,10 @@ func (c *chunk) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &c.data)
 }
 
-func (c *chunk) afterLoad() {}
+func (c *chunk) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (c *chunk) StateLoad(stateSourceObject state.Source) {
+func (c *chunk) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &c.chunkRefs)
 	stateSourceObject.Load(1, &c.data)
 }
@@ -83,9 +85,9 @@ func (r *chunkRefs) StateSave(stateSinkObject state.Sink) {
 }
 
 // +checklocksignore
-func (r *chunkRefs) StateLoad(stateSourceObject state.Source) {
+func (r *chunkRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.refCount)
-	stateSourceObject.AfterLoad(r.afterLoad)
+	stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) })
 }
 
 func (v *View) StateTypeName() string {
@@ -110,67 +112,67 @@ func (v *View) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &v.chunk)
 }
 
-func (v *View) afterLoad() {}
+func (v *View) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (v *View) StateLoad(stateSourceObject state.Source) {
+func (v *View) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &v.read)
 	stateSourceObject.Load(1, &v.write)
 	stateSourceObject.Load(2, &v.chunk)
 }
 
-func (l *viewList) StateTypeName() string {
-	return "pkg/buffer.viewList"
+func (l *ViewList) StateTypeName() string {
+	return "pkg/buffer.ViewList"
 }
 
-func (l *viewList) StateFields() []string {
+func (l *ViewList) StateFields() []string {
 	return []string{
 		"head",
 		"tail",
 	}
 }
 
-func (l *viewList) beforeSave() {}
+func (l *ViewList) beforeSave() {}
 
 // +checklocksignore
-func (l *viewList) StateSave(stateSinkObject state.Sink) {
+func (l *ViewList) StateSave(stateSinkObject state.Sink) {
 	l.beforeSave()
 	stateSinkObject.Save(0, &l.head)
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *viewList) afterLoad() {}
+func (l *ViewList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *viewList) StateLoad(stateSourceObject state.Source) {
+func (l *ViewList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
 
-func (e *viewEntry) StateTypeName() string {
-	return "pkg/buffer.viewEntry"
+func (e *ViewEntry) StateTypeName() string {
+	return "pkg/buffer.ViewEntry"
 }
 
-func (e *viewEntry) StateFields() []string {
+func (e *ViewEntry) StateFields() []string {
 	return []string{
 		"next",
 		"prev",
 	}
 }
 
-func (e *viewEntry) beforeSave() {}
+func (e *ViewEntry) beforeSave() {}
 
 // +checklocksignore
-func (e *viewEntry) StateSave(stateSinkObject state.Sink) {
+func (e *ViewEntry) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 	stateSinkObject.Save(0, &e.next)
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *viewEntry) afterLoad() {}
+func (e *ViewEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *viewEntry) StateLoad(stateSourceObject state.Source) {
+func (e *ViewEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
@@ -180,6 +182,6 @@ func init() {
 	state.Register((*chunk)(nil))
 	state.Register((*chunkRefs)(nil))
 	state.Register((*View)(nil))
-	state.Register((*viewList)(nil))
-	state.Register((*viewEntry)(nil))
+	state.Register((*ViewList)(nil))
+	state.Register((*ViewEntry)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go
index 551f06db..a58eed02 100644
--- a/vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go
+++ b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go
@@ -27,7 +27,7 @@ const (
 	// number and passing the result to MostSignificantOne64.
 	baseChunkSizeLog2 = 6
 
-	// This is the size of the buffers in the first pool. Each subsquent pool
+	// This is the size of the buffers in the first pool. Each subsequent pool
 	// creates payloads 2^(pool index) times larger than the first pool's
 	// payloads.
 	baseChunkSize = 1 << baseChunkSizeLog2 // 64
@@ -87,9 +87,7 @@ func newChunk(size int) *chunk {
 	} else {
 		pool := getChunkPool(size)
 		c = pool.Get().(*chunk)
-		for i := range c.data {
-			c.data[i] = 0
-		}
+		clear(c.data)
 	}
 	c.InitRefs()
 	return c
diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go
index 4d2d3c3d..fa0606db 100644
--- a/vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go
+++ b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go
@@ -1,6 +1,7 @@
 package buffer
 
 import (
+	"context"
 	"fmt"
 
 	"gvisor.dev/gvisor/pkg/atomicbitops"
@@ -134,7 +135,7 @@ func (r *chunkRefs) DecRef(destroy func()) {
 	}
 }
 
-func (r *chunkRefs) afterLoad() {
+func (r *chunkRefs) afterLoad(context.Context) {
 	if r.ReadRefs() > 0 {
 		refs.Register(r)
 	}
diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/view.go b/vendor/gvisor.dev/gvisor/pkg/buffer/view.go
index d7eb2f11..6c8d17ef 100644
--- a/vendor/gvisor.dev/gvisor/pkg/buffer/view.go
+++ b/vendor/gvisor.dev/gvisor/pkg/buffer/view.go
@@ -48,7 +48,7 @@ var viewPool = sync.Pool{
 //
 // +stateify savable
 type View struct {
-	viewEntry `state:"nosave"`
+	ViewEntry `state:"nosave"`
 	read      int
 	write     int
 	chunk     *chunk
diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go b/vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go
index eb9aa9a7..db855dfd 100644
--- a/vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go
+++ b/vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go
@@ -6,14 +6,14 @@ package buffer
 // objects, if they are not the same. An ElementMapper is not typically
 // required if: Linker is left as is, Element is left as is, or Linker and
 // Element are the same type.
-type viewElementMapper struct{}
+type ViewElementMapper struct{}
 
 // linkerFor maps an Element to a Linker.
 //
 // This default implementation should be inlined.
 //
 //go:nosplit
-func (viewElementMapper) linkerFor(elem *View) *View { return elem }
+func (ViewElementMapper) linkerFor(elem *View) *View { return elem }
 
 // List is an intrusive list. Entries can be added to or removed from the list
 // in O(1) time and with no additional memory allocations.
@@ -27,13 +27,13 @@ func (viewElementMapper) linkerFor(elem *View) *View { return elem }
 //	}
 //
 // +stateify savable
-type viewList struct {
+type ViewList struct {
 	head *View
 	tail *View
 }
 
 // Reset resets list l to the empty state.
-func (l *viewList) Reset() {
+func (l *ViewList) Reset() {
 	l.head = nil
 	l.tail = nil
 }
@@ -41,21 +41,21 @@ func (l *viewList) Reset() {
 // Empty returns true iff the list is empty.
 //
 //go:nosplit
-func (l *viewList) Empty() bool {
+func (l *ViewList) Empty() bool {
 	return l.head == nil
 }
 
 // Front returns the first element of list l or nil.
 //
 //go:nosplit
-func (l *viewList) Front() *View {
+func (l *ViewList) Front() *View {
 	return l.head
 }
 
 // Back returns the last element of list l or nil.
 //
 //go:nosplit
-func (l *viewList) Back() *View {
+func (l *ViewList) Back() *View {
 	return l.tail
 }
 
@@ -64,8 +64,8 @@ func (l *viewList) Back() *View {
 // NOTE: This is an O(n) operation.
 //
 //go:nosplit
-func (l *viewList) Len() (count int) {
-	for e := l.Front(); e != nil; e = (viewElementMapper{}.linkerFor(e)).Next() {
+func (l *ViewList) Len() (count int) {
+	for e := l.Front(); e != nil; e = (ViewElementMapper{}.linkerFor(e)).Next() {
 		count++
 	}
 	return count
@@ -74,12 +74,12 @@ func (l *viewList) Len() (count int) {
 // PushFront inserts the element e at the front of list l.
 //
 //go:nosplit
-func (l *viewList) PushFront(e *View) {
-	linker := viewElementMapper{}.linkerFor(e)
+func (l *ViewList) PushFront(e *View) {
+	linker := ViewElementMapper{}.linkerFor(e)
 	linker.SetNext(l.head)
 	linker.SetPrev(nil)
 	if l.head != nil {
-		viewElementMapper{}.linkerFor(l.head).SetPrev(e)
+		ViewElementMapper{}.linkerFor(l.head).SetPrev(e)
 	} else {
 		l.tail = e
 	}
@@ -90,13 +90,13 @@ func (l *viewList) PushFront(e *View) {
 // PushFrontList inserts list m at the start of list l, emptying m.
 //
 //go:nosplit
-func (l *viewList) PushFrontList(m *viewList) {
+func (l *ViewList) PushFrontList(m *ViewList) {
 	if l.head == nil {
 		l.head = m.head
 		l.tail = m.tail
 	} else if m.head != nil {
-		viewElementMapper{}.linkerFor(l.head).SetPrev(m.tail)
-		viewElementMapper{}.linkerFor(m.tail).SetNext(l.head)
+		ViewElementMapper{}.linkerFor(l.head).SetPrev(m.tail)
+		ViewElementMapper{}.linkerFor(m.tail).SetNext(l.head)
 
 		l.head = m.head
 	}
@@ -107,12 +107,12 @@ func (l *viewList) PushFrontList(m *viewList) {
 // PushBack inserts the element e at the back of list l.
 //
 //go:nosplit
-func (l *viewList) PushBack(e *View) {
-	linker := viewElementMapper{}.linkerFor(e)
+func (l *ViewList) PushBack(e *View) {
+	linker := ViewElementMapper{}.linkerFor(e)
 	linker.SetNext(nil)
 	linker.SetPrev(l.tail)
 	if l.tail != nil {
-		viewElementMapper{}.linkerFor(l.tail).SetNext(e)
+		ViewElementMapper{}.linkerFor(l.tail).SetNext(e)
 	} else {
 		l.head = e
 	}
@@ -123,13 +123,13 @@ func (l *viewList) PushBack(e *View) {
 // PushBackList inserts list m at the end of list l, emptying m.
 //
 //go:nosplit
-func (l *viewList) PushBackList(m *viewList) {
+func (l *ViewList) PushBackList(m *ViewList) {
 	if l.head == nil {
 		l.head = m.head
 		l.tail = m.tail
 	} else if m.head != nil {
-		viewElementMapper{}.linkerFor(l.tail).SetNext(m.head)
-		viewElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+		ViewElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+		ViewElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
 
 		l.tail = m.tail
 	}
@@ -140,9 +140,9 @@ func (l *viewList) PushBackList(m *viewList) {
 // InsertAfter inserts e after b.
 //
 //go:nosplit
-func (l *viewList) InsertAfter(b, e *View) {
-	bLinker := viewElementMapper{}.linkerFor(b)
-	eLinker := viewElementMapper{}.linkerFor(e)
+func (l *ViewList) InsertAfter(b, e *View) {
+	bLinker := ViewElementMapper{}.linkerFor(b)
+	eLinker := ViewElementMapper{}.linkerFor(e)
 
 	a := bLinker.Next()
 
@@ -151,7 +151,7 @@ func (l *viewList) InsertAfter(b, e *View) {
 	bLinker.SetNext(e)
 
 	if a != nil {
-		viewElementMapper{}.linkerFor(a).SetPrev(e)
+		ViewElementMapper{}.linkerFor(a).SetPrev(e)
 	} else {
 		l.tail = e
 	}
@@ -160,9 +160,9 @@ func (l *viewList) InsertAfter(b, e *View) {
 // InsertBefore inserts e before a.
 //
 //go:nosplit
-func (l *viewList) InsertBefore(a, e *View) {
-	aLinker := viewElementMapper{}.linkerFor(a)
-	eLinker := viewElementMapper{}.linkerFor(e)
+func (l *ViewList) InsertBefore(a, e *View) {
+	aLinker := ViewElementMapper{}.linkerFor(a)
+	eLinker := ViewElementMapper{}.linkerFor(e)
 
 	b := aLinker.Prev()
 	eLinker.SetNext(a)
@@ -170,7 +170,7 @@ func (l *viewList) InsertBefore(a, e *View) {
 	aLinker.SetPrev(e)
 
 	if b != nil {
-		viewElementMapper{}.linkerFor(b).SetNext(e)
+		ViewElementMapper{}.linkerFor(b).SetNext(e)
 	} else {
 		l.head = e
 	}
@@ -179,19 +179,19 @@ func (l *viewList) InsertBefore(a, e *View) {
 // Remove removes e from l.
 //
 //go:nosplit
-func (l *viewList) Remove(e *View) {
-	linker := viewElementMapper{}.linkerFor(e)
+func (l *ViewList) Remove(e *View) {
+	linker := ViewElementMapper{}.linkerFor(e)
 	prev := linker.Prev()
 	next := linker.Next()
 
 	if prev != nil {
-		viewElementMapper{}.linkerFor(prev).SetNext(next)
+		ViewElementMapper{}.linkerFor(prev).SetNext(next)
 	} else if l.head == e {
 		l.head = next
 	}
 
 	if next != nil {
-		viewElementMapper{}.linkerFor(next).SetPrev(prev)
+		ViewElementMapper{}.linkerFor(next).SetPrev(prev)
 	} else if l.tail == e {
 		l.tail = prev
 	}
@@ -205,7 +205,7 @@ func (l *viewList) Remove(e *View) {
 // methods needed by List.
 //
 // +stateify savable
-type viewEntry struct {
+type ViewEntry struct {
 	next *View
 	prev *View
 }
@@ -213,27 +213,27 @@ type viewEntry struct {
 // Next returns the entry that follows e in the list.
 //
 //go:nosplit
-func (e *viewEntry) Next() *View {
+func (e *ViewEntry) Next() *View {
 	return e.next
 }
 
 // Prev returns the entry that precedes e in the list.
 //
 //go:nosplit
-func (e *viewEntry) Prev() *View {
+func (e *ViewEntry) Prev() *View {
 	return e.prev
 }
 
 // SetNext assigns 'entry' as the entry that follows e in the list.
 //
 //go:nosplit
-func (e *viewEntry) SetNext(elem *View) {
+func (e *ViewEntry) SetNext(elem *View) {
 	e.next = elem
 }
 
 // SetPrev assigns 'entry' as the entry that precedes e in the list.
 //
 //go:nosplit
-func (e *viewEntry) SetPrev(elem *View) {
+func (e *ViewEntry) SetPrev(elem *View) {
 	e.prev = elem
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go
index 413b8493..df5acf67 100644
--- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go
+++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go
@@ -38,7 +38,7 @@ import (
 	"gvisor.dev/gvisor/pkg/sync"
 )
 
-// contextID is the package for context.Context.Value keys.
+// contextID is the package for anyContext.Context.Value keys.
 type contextID int
 
 const (
@@ -51,13 +51,13 @@ const (
 	_AT_HWCAP2 = 26
 )
 
-// context represents context.Context.
-type context interface {
+// anyContext represents context.Context.
+type anyContext interface {
 	Value(key any) any
 }
 
 // FromContext returns the FeatureSet from the context, if available.
-func FromContext(ctx context) FeatureSet {
+func FromContext(ctx anyContext) FeatureSet {
 	v := ctx.Value(CtxFeatureSet)
 	if v == nil {
 		return FeatureSet{} // Panics if used.
diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go
index 044eed07..f444210c 100644
--- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go
+++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go
@@ -18,6 +18,7 @@
 package cpuid
 
 import (
+	"context"
 	"fmt"
 	"io"
 )
@@ -56,7 +57,7 @@ func (fs *FeatureSet) saveFunction() Static {
 }
 
 // loadFunction saves the function as a static query.
-func (fs *FeatureSet) loadFunction(s Static) {
+func (fs *FeatureSet) loadFunction(_ context.Context, s Static) {
 	fs.Function = s
 }
 
@@ -308,8 +309,8 @@ func (fs FeatureSet) HasFeature(feature Feature) bool {
 // WriteCPUInfoTo is to generate a section of one cpu in /proc/cpuinfo. This is
 // a minimal /proc/cpuinfo, it is missing some fields like "microcode" that are
 // not always printed in Linux. The bogomips field is simply made up.
-func (fs FeatureSet) WriteCPUInfoTo(cpu uint, w io.Writer) {
-	// Avoid many redunant calls here, since this can occasionally appear
+func (fs FeatureSet) WriteCPUInfoTo(cpu, numCPU uint, w io.Writer) {
+	// Avoid many redundant calls here, since this can occasionally appear
 	// in the hot path. Read all basic information up front, see above.
 	ax, _, _, _ := fs.query(featureInfo)
 	ef, em, _, f, m, _ := signatureSplit(ax)
@@ -321,6 +322,12 @@ func (fs FeatureSet) WriteCPUInfoTo(cpu uint, w io.Writer) {
 	fmt.Fprintf(w, "model name\t: %s\n", "unknown") // Unknown for now.
 	fmt.Fprintf(w, "stepping\t: %s\n", "unknown")   // Unknown for now.
 	fmt.Fprintf(w, "cpu MHz\t\t: %.3f\n", cpuFreqMHz)
+	fmt.Fprintf(w, "physical id\t: 0\n") // Pretend all CPUs are in the same socket.
+	fmt.Fprintf(w, "siblings\t: %d\n", numCPU)
+	fmt.Fprintf(w, "core id\t\t: %d\n", cpu)
+	fmt.Fprintf(w, "cpu cores\t: %d\n", numCPU) // Pretend each CPU is a distinct core (rather than a hyperthread).
+	fmt.Fprintf(w, "apicid\t\t: %d\n", cpu)
+	fmt.Fprintf(w, "initial apicid\t: %d\n", cpu)
 	fmt.Fprintf(w, "fpu\t\t: yes\n")
 	fmt.Fprintf(w, "fpu_exception\t: yes\n")
 	fmt.Fprintf(w, "cpuid level\t: %d\n", uint32(xSaveInfo)) // Same as ax in vendorID.
@@ -361,8 +368,22 @@ func (fs FeatureSet) Intel() bool {
 // If xSaveInfo isn't supported, cpuid will not fault but will
 // return bogus values.
 var (
-	xsaveSize    = native(In{Eax: uint32(xSaveInfo)}).Ebx
-	maxXsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ecx
+	xsaveSize       = native(In{Eax: uint32(xSaveInfo)}).Ebx
+	maxXsaveSize    = native(In{Eax: uint32(xSaveInfo)}).Ecx
+	amxTileCfgSize  = native(In{Eax: uint32(xSaveInfo), Ecx: 17}).Eax
+	amxTileDataSize = native(In{Eax: uint32(xSaveInfo), Ecx: 18}).Eax
+)
+
+const (
+	// XCR0AMXMask are the bits that enable xsave to operate on AMX TILECFG
+	// and TILEDATA.
+	//
+	// Note: TILECFG and TILEDATA are always either both enabled or both
+	//       disabled.
+	//
+	// See Intel® 64 and IA-32 Architectures Software Developer’s Manual Vol.1
+	// section 13.3 for details.
+	XCR0AMXMask = uint64((1 << 17) | (1 << 18))
 )
 
 // ExtendedStateSize returns the number of bytes needed to save the "extended
@@ -384,15 +405,30 @@ func (fs FeatureSet) ExtendedStateSize() (size, align uint) {
 	return 512, 16
 }
 
+// AMXExtendedStateSize returns the number of bytes within the "extended state"
+// area that is used for AMX.
+func (fs FeatureSet) AMXExtendedStateSize() uint {
+	if fs.UseXsave() {
+		xcr0 := xgetbv(0)
+		if (xcr0 & XCR0AMXMask) != 0 {
+			return uint(amxTileCfgSize + amxTileDataSize)
+		}
+	}
+	return 0
+}
+
 // ValidXCR0Mask returns the valid bits in control register XCR0.
 //
+// Always exclude AMX bits, because we do not support it.
+// TODO(gvisor.dev/issues/9896): Implement AMX Support.
+//
 //go:nosplit
 func (fs FeatureSet) ValidXCR0Mask() uint64 {
 	if !fs.HasFeature(X86FeatureXSAVE) {
 		return 0
 	}
 	ax, _, _, dx := fs.query(xSaveInfo)
-	return uint64(dx)<<32 | uint64(ax)
+	return (uint64(dx)<<32 | uint64(ax)) &^ XCR0AMXMask
 }
 
 // UseXsave returns the choice of fp state saving instruction.
diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go
index 8b7c0e1b..bb416970 100644
--- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go
@@ -6,6 +6,8 @@
 package cpuid
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -31,12 +33,12 @@ func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &fs.hwCap)
 }
 
-func (fs *FeatureSet) afterLoad() {}
+func (fs *FeatureSet) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (fs *FeatureSet) StateLoad(stateSourceObject state.Source) {
+func (fs *FeatureSet) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(1, &fs.hwCap)
-	stateSourceObject.LoadValue(0, new(Static), func(y any) { fs.loadFunction(y.(Static)) })
+	stateSourceObject.LoadValue(0, new(Static), func(y any) { fs.loadFunction(ctx, y.(Static)) })
 }
 
 func (i *In) StateTypeName() string {
@@ -59,10 +61,10 @@ func (i *In) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &i.Ecx)
 }
 
-func (i *In) afterLoad() {}
+func (i *In) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *In) StateLoad(stateSourceObject state.Source) {
+func (i *In) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.Eax)
 	stateSourceObject.Load(1, &i.Ecx)
 }
@@ -91,10 +93,10 @@ func (o *Out) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(3, &o.Edx)
 }
 
-func (o *Out) afterLoad() {}
+func (o *Out) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (o *Out) StateLoad(stateSourceObject state.Source) {
+func (o *Out) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &o.Eax)
 	stateSourceObject.Load(1, &o.Ebx)
 	stateSourceObject.Load(2, &o.Ecx)
diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64.go
index 7a22e98b..964f33ac 100644
--- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64.go
+++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64.go
@@ -92,7 +92,7 @@ func (fs FeatureSet) HasFeature(feature Feature) bool {
 
 // WriteCPUInfoTo is to generate a section of one cpu in /proc/cpuinfo. This is
 // a minimal /proc/cpuinfo, and the bogomips field is simply made up.
-func (fs FeatureSet) WriteCPUInfoTo(cpu uint, w io.Writer) {
+func (fs FeatureSet) WriteCPUInfoTo(cpu, numCPU uint, w io.Writer) {
 	fmt.Fprintf(w, "processor\t: %d\n", cpu)
 	fmt.Fprintf(w, "BogoMIPS\t: %.02f\n", fs.cpuFreqMHz) // It's bogus anyway.
 	fmt.Fprintf(w, "Features\t\t: %s\n", fs.FlagString())
diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64_state_autogen.go
index 48699f70..1d7f9334 100644
--- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64_state_autogen.go
@@ -6,6 +6,8 @@
 package cpuid
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -39,10 +41,10 @@ func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(6, &fs.cpuRevDec)
 }
 
-func (fs *FeatureSet) afterLoad() {}
+func (fs *FeatureSet) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (fs *FeatureSet) StateLoad(stateSourceObject state.Source) {
+func (fs *FeatureSet) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &fs.hwCap)
 	stateSourceObject.Load(1, &fs.cpuFreqMHz)
 	stateSourceObject.Load(2, &fs.cpuImplHex)
diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_state_autogen.go
index b2fcd970..d873d007 100644
--- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_state_autogen.go
@@ -3,6 +3,8 @@
 package cpuid
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -26,10 +28,10 @@ func (h *hwCap) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &h.hwCap2)
 }
 
-func (h *hwCap) afterLoad() {}
+func (h *hwCap) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (h *hwCap) StateLoad(stateSourceObject state.Source) {
+func (h *hwCap) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &h.hwCap1)
 	stateSourceObject.Load(1, &h.hwCap2)
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/features_amd64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/features_amd64.go
index f14b0baf..4831fda3 100644
--- a/vendor/gvisor.dev/gvisor/pkg/cpuid/features_amd64.go
+++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/features_amd64.go
@@ -127,6 +127,14 @@ func (f Feature) set(s ChangeableSet, on bool) {
 			}
 		}
 		s.Set(In{Eax: uint32(extendedFeatures)}, out)
+	case 7:
+		out := s.Query(In{Eax: uint32(extendedFeatureInfo)})
+		if on {
+			out.Edx |= f.bit()
+		} else {
+			out.Edx &^= f.bit()
+		}
+		s.Set(In{Eax: uint32(extendedFeatureInfo)}, out)
 	}
 }
 
@@ -170,6 +178,9 @@ func (f Feature) check(fs FeatureSet) bool {
 			return ((dx &^ block6DuplicateMask) & f.bit()) != 0
 		}
 		return false
+	case 7:
+		_, _, _, dx := fs.query(extendedFeatureInfo)
+		return (dx & f.bit()) != 0
 	default:
 		return false
 	}
@@ -389,6 +400,43 @@ const (
 	X86Feature3DNOW    Feature = 6*32 + 31
 )
 
+// Block 7 constants are the extended features bits in
+// CPUID.(EAX=07H,ECX=0):EDX.
+const (
+	_ Feature = 7*32 + iota // edx bit 0 is reserved.
+	_                       // edx bit 1 is reserved.
+	X86FeatureAVX512_4VNNIW
+	X86FeatureAVX512_4FMAPS
+	X86FeatureFSRM
+	_ // edx bit 5 is not used in Linux.
+	_ // edx bit 6 is reserved.
+	_ // edx bit 7 is reserved.
+	X86FeatureAVX512_VP2INTERSECT
+	X86FeatureSRBDS_CTRL
+	X86FeatureMD_CLEAR
+	X86FeatureRTM_ALWAYS_ABORT
+	_ // edx bit 12 is reserved.
+	X86FeatureTSX_FORCE_ABORT
+	X86FeatureSERIALIZE
+	X86FeatureHYBRID_CPU
+	X86FeatureTSXLDTRK
+	_ // edx bit 17 is reserved.
+	X86FeaturePCONFIG
+	X86FeatureARCH_LBR
+	X86FeatureIBT
+	_ // edx bit 21 is reserved.
+	X86FeatureAMX_BF16
+	X86FeatureAVX512_FP16
+	X86FeatureAMX_TILE
+	X86FeatureAMX_INT8
+	X86FeatureSPEC_CTRL
+	X86FeatureINTEL_STIBP
+	X86FeatureFLUSH_L1D
+	X86FeatureARCH_CAPABILITIES
+	X86FeatureCORE_CAPABILITIES
+	X86FeatureSPEC_CTRL_SSBD
+)
+
 // These are the extended floating point state features. They are used to
 // enumerate floating point features in XCR0, XSTATE_BV, etc.
 const (
@@ -569,6 +617,32 @@ var allFeatures = map[Feature]allFeatureInfo{
 	X86FeatureLM:       {"lm", true},
 	X86Feature3DNOWEXT: {"3dnowext", true},
 	X86Feature3DNOW:    {"3dnow", true},
+
+	// Block 7.
+	X86FeatureAVX512_4VNNIW:       {"avx512_4vnniw", true},
+	X86FeatureAVX512_4FMAPS:       {"avx512_4fmaps", true},
+	X86FeatureFSRM:                {"fsrm", true},
+	X86FeatureAVX512_VP2INTERSECT: {"avx512_vp2intersect", true},
+	X86FeatureSRBDS_CTRL:          {"srbds_ctrl", false},
+	X86FeatureMD_CLEAR:            {"md_clear", true},
+	X86FeatureRTM_ALWAYS_ABORT:    {"rtm_always_abort", false},
+	X86FeatureTSX_FORCE_ABORT:     {"tsx_force_abort", false},
+	X86FeatureSERIALIZE:           {"serialize", true},
+	X86FeatureHYBRID_CPU:          {"hybrid_cpu", false},
+	X86FeatureTSXLDTRK:            {"tsxldtrk", true},
+	X86FeaturePCONFIG:             {"pconfig", true},
+	X86FeatureARCH_LBR:            {"arch_lbr", true},
+	X86FeatureIBT:                 {"ibt", true},
+	X86FeatureAMX_BF16:            {"amx_bf16", true},
+	X86FeatureAVX512_FP16:         {"avx512_fp16", true},
+	X86FeatureAMX_TILE:            {"amx_tile", true},
+	X86FeatureAMX_INT8:            {"amx_int8", true},
+	X86FeatureSPEC_CTRL:           {"spec_ctrl", false},
+	X86FeatureINTEL_STIBP:         {"intel_stibp", false},
+	X86FeatureFLUSH_L1D:           {"flush_l1d", true},
+	X86FeatureARCH_CAPABILITIES:   {"arch_capabilities", true},
+	X86FeatureCORE_CAPABILITIES:   {"core_capabilities", false},
+	X86FeatureSPEC_CTRL_SSBD:      {"spec_ctrl_ssbd", false},
 }
 
 // linuxBlockOrder defines the order in which linux organizes the feature
@@ -576,7 +650,7 @@ var allFeatures = map[Feature]allFeatureInfo{
 // which doesn't match well here, so for the /proc/cpuinfo generation we simply
 // re-map the blocks to Linux's ordering and then go through the bits in each
 // block.
-var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4, 3}
+var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4, 3, 7}
 
 func archFlagOrder(fn func(Feature)) {
 	for _, b := range linuxBlockOrder {
diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.go
index eaf77511..ac2fcbbc 100644
--- a/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.go
+++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.go
@@ -215,6 +215,9 @@ func readMaxCPUFreq() {
 
 }
 
+// xgetbv reads an extended control register.
+func xgetbv(reg uintptr) uint64
+
 // archInitialize initializes hostFeatureSet.
 func archInitialize() {
 	hostFeatureSet = FeatureSet{
diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.s b/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.s
index dd21b4bd..04a1433a 100644
--- a/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.s
+++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.s
@@ -23,3 +23,16 @@ TEXT ·native(SB),NOSPLIT|NOFRAME,$0-24
 	MOVL CX, ret_Ecx+16(FP)
 	MOVL DX, ret_Edx+20(FP)
 	RET
+
+// xgetbv reads an extended control register.
+//
+// The code corresponds to:
+//
+// 	xgetbv
+//
+TEXT ·xgetbv(SB),NOSPLIT|NOFRAME,$0-16
+	MOVQ reg+0(FP), CX
+	BYTE $0x0f; BYTE $0x01; BYTE $0xd0;
+	MOVL AX, ret+8(FP)
+	MOVL DX, ret+12(FP)
+	RET
diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/static_amd64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/static_amd64.go
index 09bcf16b..f21f2e4f 100644
--- a/vendor/gvisor.dev/gvisor/pkg/cpuid/static_amd64.go
+++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/static_amd64.go
@@ -17,6 +17,8 @@
 
 package cpuid
 
+import "context"
+
 // Static is a static CPUID function.
 //
 // +stateify savable
@@ -90,7 +92,7 @@ func (s Static) ToFeatureSet() FeatureSet {
 }
 
 // afterLoad calls normalize.
-func (s Static) afterLoad() {
+func (s Static) afterLoad(context.Context) {
 	s.normalize()
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/goid/goid_amd64.s b/vendor/gvisor.dev/gvisor/pkg/goid/goid_122_amd64.s
similarity index 97%
rename from vendor/gvisor.dev/gvisor/pkg/goid/goid_amd64.s
rename to vendor/gvisor.dev/gvisor/pkg/goid/goid_122_amd64.s
index 8d0e9b31..5039f73f 100644
--- a/vendor/gvisor.dev/gvisor/pkg/goid/goid_amd64.s
+++ b/vendor/gvisor.dev/gvisor/pkg/goid/goid_122_amd64.s
@@ -12,6 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build !go1.23
+
 #include "textflag.h"
 
 #define GOID_OFFSET 152 // +checkoffset runtime g.goid
diff --git a/vendor/gvisor.dev/gvisor/pkg/goid/goid_arm64.s b/vendor/gvisor.dev/gvisor/pkg/goid/goid_122_arm64.s
similarity index 97%
rename from vendor/gvisor.dev/gvisor/pkg/goid/goid_arm64.s
rename to vendor/gvisor.dev/gvisor/pkg/goid/goid_122_arm64.s
index 07b04a24..ec59b4be 100644
--- a/vendor/gvisor.dev/gvisor/pkg/goid/goid_arm64.s
+++ b/vendor/gvisor.dev/gvisor/pkg/goid/goid_122_arm64.s
@@ -12,6 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//go:build !go1.23
+
 #include "textflag.h"
 
 #define GOID_OFFSET 152 // +checkoffset runtime g.goid
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_global_state.go b/vendor/gvisor.dev/gvisor/pkg/goid/goid_123_amd64.s
similarity index 67%
rename from vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_global_state.go
rename to vendor/gvisor.dev/gvisor/pkg/goid/goid_123_amd64.s
index dfec4258..9f53a4e9 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_global_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/goid/goid_123_amd64.s
@@ -1,4 +1,4 @@
-// Copyright 2018 The gVisor Authors.
+// Copyright 2020 The gVisor Authors.
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,8 +12,15 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package stack
+//go:build go1.23
 
-// StackFromEnv is the global stack created in restore run.
-// FIXME(b/36201077)
-var StackFromEnv *Stack
+#include "textflag.h"
+
+#define GOID_OFFSET 160 // +checkoffset runtime g.goid
+
+// func goid() int64
+TEXT ·goid(SB),NOSPLIT|NOFRAME,$0-8
+  MOVQ (TLS), R14
+  MOVQ GOID_OFFSET(R14), R14
+  MOVQ R14, ret+0(FP)
+  RET
diff --git a/vendor/gvisor.dev/gvisor/pkg/goid/goid_123_arm64.s b/vendor/gvisor.dev/gvisor/pkg/goid/goid_123_arm64.s
new file mode 100644
index 00000000..08d70578
--- /dev/null
+++ b/vendor/gvisor.dev/gvisor/pkg/goid/goid_123_arm64.s
@@ -0,0 +1,26 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.23
+
+#include "textflag.h"
+
+#define GOID_OFFSET 160 // +checkoffset runtime g.goid
+
+// func goid() int64
+TEXT ·goid(SB),NOSPLIT,$0-8
+        MOVD g, R0      // g
+        MOVD GOID_OFFSET(R0), R0
+        MOVD R0, ret+0(FP)
+        RET
diff --git a/vendor/gvisor.dev/gvisor/pkg/log/json.go b/vendor/gvisor.dev/gvisor/pkg/log/json.go
index a7f55a9f..a57bc101 100644
--- a/vendor/gvisor.dev/gvisor/pkg/log/json.go
+++ b/vendor/gvisor.dev/gvisor/pkg/log/json.go
@@ -17,6 +17,8 @@ package log
 import (
 	"encoding/json"
 	"fmt"
+	"runtime"
+	"strings"
 	"time"
 )
 
@@ -62,9 +64,16 @@ type JSONEmitter struct {
 }
 
 // Emit implements Emitter.Emit.
-func (e JSONEmitter) Emit(_ int, level Level, timestamp time.Time, format string, v ...any) {
+func (e JSONEmitter) Emit(depth int, level Level, timestamp time.Time, format string, v ...any) {
+	logLine := fmt.Sprintf(format, v...)
+	if _, file, line, ok := runtime.Caller(depth + 1); ok {
+		if slash := strings.LastIndexByte(file, byte('/')); slash >= 0 {
+			file = file[slash+1:] // Trim any directory path from the file.
+		}
+		logLine = fmt.Sprintf("%s:%d] %s", file, line, logLine)
+	}
 	j := jsonLog{
-		Msg:   fmt.Sprintf(format, v...),
+		Msg:   logLine,
 		Level: level,
 		Time:  timestamp,
 	}
diff --git a/vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go b/vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go
index 0105c068..8f5aab5a 100644
--- a/vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go
+++ b/vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go
@@ -17,6 +17,8 @@ package log
 import (
 	"encoding/json"
 	"fmt"
+	"runtime"
+	"strings"
 	"time"
 )
 
@@ -33,9 +35,16 @@ type K8sJSONEmitter struct {
 }
 
 // Emit implements Emitter.Emit.
-func (e K8sJSONEmitter) Emit(_ int, level Level, timestamp time.Time, format string, v ...any) {
+func (e K8sJSONEmitter) Emit(depth int, level Level, timestamp time.Time, format string, v ...any) {
+	logLine := fmt.Sprintf(format, v...)
+	if _, file, line, ok := runtime.Caller(depth + 1); ok {
+		if slash := strings.LastIndexByte(file, byte('/')); slash >= 0 {
+			file = file[slash+1:] // Trim any directory path from the file.
+		}
+		logLine = fmt.Sprintf("%s:%d] %s", file, line, logLine)
+	}
 	j := k8sJSONLog{
-		Log:   fmt.Sprintf(format, v...),
+		Log:   logLine,
 		Level: level,
 		Time:  timestamp,
 	}
diff --git a/vendor/gvisor.dev/gvisor/pkg/log/log.go b/vendor/gvisor.dev/gvisor/pkg/log/log.go
index af95fb32..581aa77c 100644
--- a/vendor/gvisor.dev/gvisor/pkg/log/log.go
+++ b/vendor/gvisor.dev/gvisor/pkg/log/log.go
@@ -250,11 +250,11 @@ func (l *BasicLogger) SetLevel(level Level) {
 var logMu sync.Mutex
 
 // log is the default logger.
-var log atomic.Value
+var log atomic.Pointer[BasicLogger]
 
 // Log retrieves the global logger.
 func Log() *BasicLogger {
-	return log.Load().(*BasicLogger)
+	return log.Load()
 }
 
 // SetTarget sets the log target.
diff --git a/vendor/gvisor.dev/gvisor/pkg/rand/rand.go b/vendor/gvisor.dev/gvisor/pkg/rand/rand.go
index be0e85fd..94d2764d 100644
--- a/vendor/gvisor.dev/gvisor/pkg/rand/rand.go
+++ b/vendor/gvisor.dev/gvisor/pkg/rand/rand.go
@@ -15,8 +15,6 @@
 //go:build !linux
 // +build !linux
 
-// Package rand implements a cryptographically secure pseudorandom number
-// generator.
 package rand
 
 import "crypto/rand"
diff --git a/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go b/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go
index fd5fa5d6..0913e8b0 100644
--- a/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go
+++ b/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go
@@ -12,8 +12,6 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package rand implements a cryptographically secure pseudorandom number
-// generator.
 package rand
 
 import (
diff --git a/vendor/gvisor.dev/gvisor/pkg/rand/rng.go b/vendor/gvisor.dev/gvisor/pkg/rand/rng.go
new file mode 100644
index 00000000..ac2d0f8d
--- /dev/null
+++ b/vendor/gvisor.dev/gvisor/pkg/rand/rng.go
@@ -0,0 +1,131 @@
+// Copyright 2023 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rand implements a cryptographically secure pseudorandom number
+// generator.
+package rand
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+// RNG exposes convenience functions based on a cryptographically secure
+// io.Reader.
+type RNG struct {
+	Reader io.Reader
+}
+
+// RNGFrom returns a new RNG. r must be a cryptographically secure io.Reader.
+func RNGFrom(r io.Reader) RNG {
+	return RNG{Reader: r}
+}
+
+// Uint16 is analogous to the standard library's math/rand.Uint16.
+func (rg *RNG) Uint16() uint16 {
+	var data [2]byte
+	if _, err := rg.Reader.Read(data[:]); err != nil {
+		panic(fmt.Sprintf("Read() failed: %v", err))
+	}
+	return binary.NativeEndian.Uint16(data[:])
+}
+
+// Uint32 is analogous to the standard library's math/rand.Uint32.
+func (rg *RNG) Uint32() uint32 {
+	var data [4]byte
+	if _, err := rg.Reader.Read(data[:]); err != nil {
+		panic(fmt.Sprintf("Read() failed: %v", err))
+	}
+	return binary.NativeEndian.Uint32(data[:])
+}
+
+// Int63n is analogous to the standard library's math/rand.Int63n.
+func (rg *RNG) Int63n(n int64) int64 {
+	// Based on Go's rand package implementation, but using
+	// cryptographically secure random numbers.
+	if n <= 0 {
+		panic(fmt.Sprintf("n must be positive, but got %d", n))
+	}
+
+	// This can be done quickly when n is a power of 2.
+	if n&(n-1) == 0 {
+		return int64(rg.Uint64()) & (n - 1)
+	}
+
+	// The naive approach would be to return rg.Int63()%n, but we need the
+	// random number to be fair. It shouldn't be biased towards certain
+	// results, but simple modular math can be very biased. For example, if
+	// n is 40% of the maximum int64, then the output values of rg.Int63
+	// map to return values as follows:
+	//
+	//  - The first 40% of values map to themselves.
+	//  - The second 40% map to themselves - maximum int64.
+	//  - The remaining 20% map to the themselves - 2 * (maximum int64),
+	//    i.e. the first half of possible output values.
+	//
+	// And thus 60% of results map the first half of possible output
+	// values, and 40% map the second half. Oops!
+	//
+	// We use the same trick as Go to deal with this: shave off the last
+	// segment (the 20% in our example) to make the RNG more fair.
+	//
+	// In the worst case, n is just over half of maximum int64, meaning
+	// that the upper half of rg.Int63 return values are bad. So each call
+	// to rg.Int63 has, at worst, a 50% chance of needing a retry.
+	maximum := int64((1 << 63) - 1 - (1<<63)%uint64(n))
+	ret := rg.Int63()
+	for ret > maximum {
+		ret = rg.Int63()
+	}
+	return ret % n
+}
+
+// Int63 is analogous to the standard library's math/rand.Int63.
+func (rg *RNG) Int63() int64 {
+	return ((1 << 63) - 1) & int64(rg.Uint64())
+}
+
+// Uint64 is analogous to the standard library's math/rand.Uint64.
+func (rg *RNG) Uint64() uint64 {
+	var data [8]byte
+	if _, err := rg.Reader.Read(data[:]); err != nil {
+		panic(fmt.Sprintf("Read() failed: %v", err))
+	}
+	return binary.NativeEndian.Uint64(data[:])
+}
+
+// Uint32 is analogous to the standard library's math/rand.Uint32.
+func Uint32() uint32 {
+	rng := RNG{Reader: Reader}
+	return rng.Uint32()
+}
+
+// Int63n is analogous to the standard library's math/rand.Int63n.
+func Int63n(n int64) int64 {
+	rng := RNG{Reader: Reader}
+	return rng.Int63n(n)
+}
+
+// Int63 is analogous to the standard library's math/rand.Int63.
+func Int63() int64 {
+	rng := RNG{Reader: Reader}
+	return rng.Int63()
+}
+
+// Uint64 is analogous to the standard library's math/rand.Uint64.
+func Uint64() uint64 {
+	rng := RNG{Reader: Reader}
+	return rng.Uint64()
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe.go
index 822c0f42..eab682dc 100644
--- a/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe.go
+++ b/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe.go
@@ -68,6 +68,7 @@
 package sleep
 
 import (
+	"context"
 	"sync/atomic"
 	"unsafe"
 
@@ -129,7 +130,7 @@ func (s *Sleeper) saveSharedList() *Waker {
 }
 
 // loadSharedList is invoked by stateify.
-func (s *Sleeper) loadSharedList(w *Waker) {
+func (s *Sleeper) loadSharedList(_ context.Context, w *Waker) {
 	atomic.StorePointer(&s.sharedList, unsafe.Pointer(w))
 }
 
@@ -206,7 +207,7 @@ func (s *Sleeper) nextWaker(block, wakepOrSleep bool) *Waker {
 			// See:runtime2.go in the go runtime package for
 			// the values to pass as the waitReason here.
 			const waitReasonSelect = 9
-			sync.Gopark(commitSleep, unsafe.Pointer(&s.waitingG), sync.WaitReasonSelect, sync.TraceEvGoBlockSelect, 0)
+			sync.Gopark(commitSleep, unsafe.Pointer(&s.waitingG), sync.WaitReasonSelect, sync.TraceBlockSelect, 0)
 		}
 
 		// Pull the shared list out and reverse it in the local
@@ -408,7 +409,7 @@ func (w *Waker) saveS() wakerState {
 }
 
 // loadS is invoked by stateify.
-func (w *Waker) loadS(ws wakerState) {
+func (w *Waker) loadS(_ context.Context, ws wakerState) {
 	if ws.asserted {
 		atomic.StorePointer(&w.s, unsafe.Pointer(&assertedSleeper))
 	} else {
diff --git a/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe_state_autogen.go
index d91ace02..b346c34a 100644
--- a/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe_state_autogen.go
@@ -3,6 +3,8 @@
 package sleep
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -30,13 +32,13 @@ func (s *Sleeper) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &s.allWakers)
 }
 
-func (s *Sleeper) afterLoad() {}
+func (s *Sleeper) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *Sleeper) StateLoad(stateSourceObject state.Source) {
+func (s *Sleeper) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(1, &s.localList)
 	stateSourceObject.Load(2, &s.allWakers)
-	stateSourceObject.LoadValue(0, new(*Waker), func(y any) { s.loadSharedList(y.(*Waker)) })
+	stateSourceObject.LoadValue(0, new(*Waker), func(y any) { s.loadSharedList(ctx, y.(*Waker)) })
 }
 
 func (w *Waker) StateTypeName() string {
@@ -63,13 +65,13 @@ func (w *Waker) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &w.allWakersNext)
 }
 
-func (w *Waker) afterLoad() {}
+func (w *Waker) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (w *Waker) StateLoad(stateSourceObject state.Source) {
+func (w *Waker) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(1, &w.next)
 	stateSourceObject.Load(2, &w.allWakersNext)
-	stateSourceObject.LoadValue(0, new(wakerState), func(y any) { w.loadS(y.(wakerState)) })
+	stateSourceObject.LoadValue(0, new(wakerState), func(y any) { w.loadS(ctx, y.(wakerState)) })
 }
 
 func init() {
diff --git a/vendor/gvisor.dev/gvisor/pkg/state/addr_set.go b/vendor/gvisor.dev/gvisor/pkg/state/addr_set.go
index 10fe0ea5..49b8bd5e 100644
--- a/vendor/gvisor.dev/gvisor/pkg/state/addr_set.go
+++ b/vendor/gvisor.dev/gvisor/pkg/state/addr_set.go
@@ -2,6 +2,7 @@ package state
 
 import (
 	"bytes"
+	"context"
 	"fmt"
 )
 
@@ -56,7 +57,7 @@ const (
 //
 // +stateify savable
 type addrSet struct {
-	root addrnode `state:".(*addrSegmentDataSlices)"`
+	root addrnode `state:".([]addrFlatSegment)"`
 }
 
 // IsEmpty returns true if the set contains no segments.
@@ -228,42 +229,68 @@ func (s *addrSet) UpperBoundGap(max uintptr) addrGapIterator {
 	return seg.PrevGap()
 }
 
-// Add inserts the given segment into the set and returns true. If the new
-// segment can be merged with adjacent segments, Add will do so. If the new
-// segment would overlap an existing segment, Add returns false. If Add
-// succeeds, all existing iterators are invalidated.
-func (s *addrSet) Add(r addrRange, val *objectEncodeState) bool {
-	if r.Length() <= 0 {
-		panic(fmt.Sprintf("invalid segment range %v", r))
+// FirstLargeEnoughGap returns the first gap in the set with at least the given
+// length. If no such gap exists, FirstLargeEnoughGap returns a terminal
+// iterator.
+//
+// Precondition: trackGaps must be 1.
+func (s *addrSet) FirstLargeEnoughGap(minSize uintptr) addrGapIterator {
+	if addrtrackGaps != 1 {
+		panic("set is not tracking gaps")
 	}
-	gap := s.FindGap(r.Start)
-	if !gap.Ok() {
-		return false
+	gap := s.FirstGap()
+	if gap.Range().Length() >= minSize {
+		return gap
 	}
-	if r.End > gap.End() {
-		return false
+	return gap.NextLargeEnoughGap(minSize)
+}
+
+// LastLargeEnoughGap returns the last gap in the set with at least the given
+// length. If no such gap exists, LastLargeEnoughGap returns a terminal
+// iterator.
+//
+// Precondition: trackGaps must be 1.
+func (s *addrSet) LastLargeEnoughGap(minSize uintptr) addrGapIterator {
+	if addrtrackGaps != 1 {
+		panic("set is not tracking gaps")
 	}
-	s.Insert(gap, r, val)
-	return true
+	gap := s.LastGap()
+	if gap.Range().Length() >= minSize {
+		return gap
+	}
+	return gap.PrevLargeEnoughGap(minSize)
 }
 
-// AddWithoutMerging inserts the given segment into the set and returns true.
-// If it would overlap an existing segment, AddWithoutMerging does nothing and
-// returns false. If AddWithoutMerging succeeds, all existing iterators are
-// invalidated.
-func (s *addrSet) AddWithoutMerging(r addrRange, val *objectEncodeState) bool {
-	if r.Length() <= 0 {
-		panic(fmt.Sprintf("invalid segment range %v", r))
+// LowerBoundLargeEnoughGap returns the first gap in the set with at least the
+// given length and whose range contains a key greater than or equal to min. If
+// no such gap exists, LowerBoundLargeEnoughGap returns a terminal iterator.
+//
+// Precondition: trackGaps must be 1.
+func (s *addrSet) LowerBoundLargeEnoughGap(min, minSize uintptr) addrGapIterator {
+	if addrtrackGaps != 1 {
+		panic("set is not tracking gaps")
 	}
-	gap := s.FindGap(r.Start)
-	if !gap.Ok() {
-		return false
+	gap := s.LowerBoundGap(min)
+	if gap.Range().Length() >= minSize {
+		return gap
 	}
-	if r.End > gap.End() {
-		return false
+	return gap.NextLargeEnoughGap(minSize)
+}
+
+// UpperBoundLargeEnoughGap returns the last gap in the set with at least the
+// given length and whose range contains a key less than or equal to max. If no
+// such gap exists, UpperBoundLargeEnoughGap returns a terminal iterator.
+//
+// Precondition: trackGaps must be 1.
+func (s *addrSet) UpperBoundLargeEnoughGap(max, minSize uintptr) addrGapIterator {
+	if addrtrackGaps != 1 {
+		panic("set is not tracking gaps")
+	}
+	gap := s.UpperBoundGap(max)
+	if gap.Range().Length() >= minSize {
+		return gap
 	}
-	s.InsertWithoutMergingUnchecked(gap, r, val)
-	return true
+	return gap.PrevLargeEnoughGap(minSize)
 }
 
 // Insert inserts the given segment into the given gap. If the new segment can
@@ -360,6 +387,107 @@ func (s *addrSet) InsertWithoutMergingUnchecked(gap addrGapIterator, r addrRange
 	return addrIterator{gap.node, gap.index}
 }
 
+// InsertRange inserts the given segment into the set. If the new segment can
+// be merged with adjacent segments, InsertRange will do so. InsertRange
+// returns an iterator to the segment containing the inserted value (which may
+// have been merged with other values). All existing iterators (excluding the
+// returned iterator) are invalidated.
+//
+// If the new segment would overlap an existing segment, or if r is invalid,
+// InsertRange panics.
+//
+// InsertRange searches the set to find the gap to insert into. If the caller
+// already has the appropriate GapIterator, or if the caller needs to do
+// additional work between finding the gap and insertion, use Insert instead.
+func (s *addrSet) InsertRange(r addrRange, val *objectEncodeState) addrIterator {
+	if r.Length() <= 0 {
+		panic(fmt.Sprintf("invalid segment range %v", r))
+	}
+	seg, gap := s.Find(r.Start)
+	if seg.Ok() {
+		panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, seg.Range()))
+	}
+	if gap.End() < r.End {
+		panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, gap.NextSegment().Range()))
+	}
+	return s.Insert(gap, r, val)
+}
+
+// InsertWithoutMergingRange inserts the given segment into the set and returns
+// an iterator to the inserted segment. All existing iterators (excluding the
+// returned iterator) are invalidated.
+//
+// If the new segment would overlap an existing segment, or if r is invalid,
+// InsertWithoutMergingRange panics.
+//
+// InsertWithoutMergingRange searches the set to find the gap to insert into.
+// If the caller already has the appropriate GapIterator, or if the caller
+// needs to do additional work between finding the gap and insertion, use
+// InsertWithoutMerging instead.
+func (s *addrSet) InsertWithoutMergingRange(r addrRange, val *objectEncodeState) addrIterator {
+	if r.Length() <= 0 {
+		panic(fmt.Sprintf("invalid segment range %v", r))
+	}
+	seg, gap := s.Find(r.Start)
+	if seg.Ok() {
+		panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, seg.Range()))
+	}
+	if gap.End() < r.End {
+		panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, gap.NextSegment().Range()))
+	}
+	return s.InsertWithoutMerging(gap, r, val)
+}
+
+// TryInsertRange attempts to insert the given segment into the set. If the new
+// segment can be merged with adjacent segments, TryInsertRange will do so.
+// TryInsertRange returns an iterator to the segment containing the inserted
+// value (which may have been merged with other values). All existing iterators
+// (excluding the returned iterator) are invalidated.
+//
+// If the new segment would overlap an existing segment, TryInsertRange does
+// nothing and returns a terminal iterator.
+//
+// TryInsertRange searches the set to find the gap to insert into. If the
+// caller already has the appropriate GapIterator, or if the caller needs to do
+// additional work between finding the gap and insertion, use Insert instead.
+func (s *addrSet) TryInsertRange(r addrRange, val *objectEncodeState) addrIterator {
+	if r.Length() <= 0 {
+		panic(fmt.Sprintf("invalid segment range %v", r))
+	}
+	seg, gap := s.Find(r.Start)
+	if seg.Ok() {
+		return addrIterator{}
+	}
+	if gap.End() < r.End {
+		return addrIterator{}
+	}
+	return s.Insert(gap, r, val)
+}
+
+// TryInsertWithoutMergingRange attempts to insert the given segment into the
+// set. If successful, it returns an iterator to the inserted segment; all
+// existing iterators (excluding the returned iterator) are invalidated. If the
+// new segment would overlap an existing segment, TryInsertWithoutMergingRange
+// does nothing and returns a terminal iterator.
+//
+// TryInsertWithoutMergingRange searches the set to find the gap to insert
+// into. If the caller already has the appropriate GapIterator, or if the
+// caller needs to do additional work between finding the gap and insertion,
+// use InsertWithoutMerging instead.
+func (s *addrSet) TryInsertWithoutMergingRange(r addrRange, val *objectEncodeState) addrIterator {
+	if r.Length() <= 0 {
+		panic(fmt.Sprintf("invalid segment range %v", r))
+	}
+	seg, gap := s.Find(r.Start)
+	if seg.Ok() {
+		return addrIterator{}
+	}
+	if gap.End() < r.End {
+		return addrIterator{}
+	}
+	return s.InsertWithoutMerging(gap, r, val)
+}
+
 // Remove removes the given segment and returns an iterator to the vacated gap.
 // All existing iterators (including seg, but not including the returned
 // iterator) are invalidated.
@@ -396,6 +524,11 @@ func (s *addrSet) RemoveAll() {
 
 // RemoveRange removes all segments in the given range. An iterator to the
 // newly formed gap is returned, and all existing iterators are invalidated.
+//
+// RemoveRange searches the set to find segments to remove. If the caller
+// already has an iterator to either end of the range of segments to remove, or
+// if the caller needs to do additional work before removing each segment,
+// iterate segments and call Remove in a loop instead.
 func (s *addrSet) RemoveRange(r addrRange) addrGapIterator {
 	seg, gap := s.Find(r.Start)
 	if seg.Ok() {
@@ -403,12 +536,34 @@ func (s *addrSet) RemoveRange(r addrRange) addrGapIterator {
 		gap = s.Remove(seg)
 	}
 	for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
-		seg = s.Isolate(seg, r)
+		seg = s.SplitAfter(seg, r.End)
 		gap = s.Remove(seg)
 	}
 	return gap
 }
 
+// RemoveFullRange is equivalent to RemoveRange, except that if any key in the
+// given range does not correspond to a segment, RemoveFullRange panics.
+func (s *addrSet) RemoveFullRange(r addrRange) addrGapIterator {
+	seg := s.FindSegment(r.Start)
+	if !seg.Ok() {
+		panic(fmt.Sprintf("missing segment at %v", r.Start))
+	}
+	seg = s.SplitBefore(seg, r.Start)
+	for {
+		seg = s.SplitAfter(seg, r.End)
+		end := seg.End()
+		gap := s.Remove(seg)
+		if r.End <= end {
+			return gap
+		}
+		seg = gap.NextSegment()
+		if !seg.Ok() || seg.Start() != end {
+			panic(fmt.Sprintf("missing segment at %v", end))
+		}
+	}
+}
+
 // Merge attempts to merge two neighboring segments. If successful, Merge
 // returns an iterator to the merged segment, and all existing iterators are
 // invalidated. Otherwise, Merge returns a terminal iterator.
@@ -441,7 +596,68 @@ func (s *addrSet) MergeUnchecked(first, second addrIterator) addrIterator {
 	return addrIterator{}
 }
 
-// MergeAll attempts to merge all adjacent segments in the set. All existing
+// MergePrev attempts to merge the given segment with its predecessor if
+// possible, and returns an updated iterator to the extended segment. All
+// existing iterators (including seg, but not including the returned iterator)
+// are invalidated.
+//
+// MergePrev is usually used when mutating segments while iterating them in
+// order of increasing keys, to attempt merging of each mutated segment with
+// its previously-mutated predecessor. In such cases, merging a mutated segment
+// with its unmutated successor would incorrectly cause the latter to be
+// skipped.
+func (s *addrSet) MergePrev(seg addrIterator) addrIterator {
+	if prev := seg.PrevSegment(); prev.Ok() {
+		if mseg := s.MergeUnchecked(prev, seg); mseg.Ok() {
+			seg = mseg
+		}
+	}
+	return seg
+}
+
+// MergeNext attempts to merge the given segment with its successor if
+// possible, and returns an updated iterator to the extended segment. All
+// existing iterators (including seg, but not including the returned iterator)
+// are invalidated.
+//
+// MergeNext is usually used when mutating segments while iterating them in
+// order of decreasing keys, to attempt merging of each mutated segment with
+// its previously-mutated successor. In such cases, merging a mutated segment
+// with its unmutated predecessor would incorrectly cause the latter to be
+// skipped.
+func (s *addrSet) MergeNext(seg addrIterator) addrIterator {
+	if next := seg.NextSegment(); next.Ok() {
+		if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+			seg = mseg
+		}
+	}
+	return seg
+}
+
+// Unisolate attempts to merge the given segment with its predecessor and
+// successor if possible, and returns an updated iterator to the extended
+// segment. All existing iterators (including seg, but not including the
+// returned iterator) are invalidated.
+//
+// Unisolate is usually used in conjunction with Isolate when mutating part of
+// a single segment in a way that may affect its mergeability. For the reasons
+// described by MergePrev and MergeNext, it is usually incorrect to use the
+// return value of Unisolate in a loop variable.
+func (s *addrSet) Unisolate(seg addrIterator) addrIterator {
+	if prev := seg.PrevSegment(); prev.Ok() {
+		if mseg := s.MergeUnchecked(prev, seg); mseg.Ok() {
+			seg = mseg
+		}
+	}
+	if next := seg.NextSegment(); next.Ok() {
+		if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+			seg = mseg
+		}
+	}
+	return seg
+}
+
+// MergeAll merges all mergeable adjacent segments in the set. All existing
 // iterators are invalidated.
 func (s *addrSet) MergeAll() {
 	seg := s.FirstSegment()
@@ -458,15 +674,20 @@ func (s *addrSet) MergeAll() {
 	}
 }
 
-// MergeRange attempts to merge all adjacent segments that contain a key in the
-// specific range. All existing iterators are invalidated.
-func (s *addrSet) MergeRange(r addrRange) {
+// MergeInsideRange attempts to merge all adjacent segments that contain a key
+// in the specific range. All existing iterators are invalidated.
+//
+// MergeInsideRange only makes sense after mutating the set in a way that may
+// change the mergeability of modified segments; callers should prefer to use
+// MergePrev or MergeNext during the mutating loop instead (depending on the
+// direction of iteration), in order to avoid a redundant search.
+func (s *addrSet) MergeInsideRange(r addrRange) {
 	seg := s.LowerBoundSegment(r.Start)
 	if !seg.Ok() {
 		return
 	}
 	next := seg.NextSegment()
-	for next.Ok() && next.Range().Start < r.End {
+	for next.Ok() && next.Start() < r.End {
 		if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
 			seg, next = mseg, mseg.NextSegment()
 		} else {
@@ -475,9 +696,14 @@ func (s *addrSet) MergeRange(r addrRange) {
 	}
 }
 
-// MergeAdjacent attempts to merge the segment containing r.Start with its
+// MergeOutsideRange attempts to merge the segment containing r.Start with its
 // predecessor, and the segment containing r.End-1 with its successor.
-func (s *addrSet) MergeAdjacent(r addrRange) {
+//
+// MergeOutsideRange only makes sense after mutating the set in a way that may
+// change the mergeability of modified segments; callers should prefer to use
+// MergePrev or MergeNext during the mutating loop instead (depending on the
+// direction of iteration), in order to avoid two redundant searches.
+func (s *addrSet) MergeOutsideRange(r addrRange) {
 	first := s.FindSegment(r.Start)
 	if first.Ok() {
 		if prev := first.PrevSegment(); prev.Ok() {
@@ -522,21 +748,58 @@ func (s *addrSet) SplitUnchecked(seg addrIterator, split uintptr) (addrIterator,
 	return seg2.PrevSegment(), seg2
 }
 
-// SplitAt splits the segment straddling split, if one exists. SplitAt returns
-// true if a segment was split and false otherwise. If SplitAt splits a
-// segment, all existing iterators are invalidated.
-func (s *addrSet) SplitAt(split uintptr) bool {
-	if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
-		s.SplitUnchecked(seg, split)
-		return true
+// SplitBefore ensures that the given segment's start is at least start by
+// splitting at start if necessary, and returns an updated iterator to the
+// bounded segment. All existing iterators (including seg, but not including
+// the returned iterator) are invalidated.
+//
+// SplitBefore is usually when mutating segments in a range. In such cases,
+// when iterating segments in order of increasing keys, the first segment may
+// extend beyond the start of the range to be mutated, and needs to be
+// SplitBefore to ensure that only the part of the segment within the range is
+// mutated. When iterating segments in order of decreasing keys, SplitBefore
+// and SplitAfter; i.e. SplitBefore needs to be invoked on each segment, while
+// SplitAfter only needs to be invoked on the first.
+//
+// Preconditions: start < seg.End().
+func (s *addrSet) SplitBefore(seg addrIterator, start uintptr) addrIterator {
+	if seg.Range().CanSplitAt(start) {
+		_, seg = s.SplitUnchecked(seg, start)
 	}
-	return false
+	return seg
 }
 
-// Isolate ensures that the given segment's range does not escape r by
-// splitting at r.Start and r.End if necessary, and returns an updated iterator
-// to the bounded segment. All existing iterators (including seg, but not
-// including the returned iterators) are invalidated.
+// SplitAfter ensures that the given segment's end is at most end by splitting
+// at end if necessary, and returns an updated iterator to the bounded segment.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+//
+// SplitAfter is usually used when mutating segments in a range. In such cases,
+// when iterating segments in order of increasing keys, each iterated segment
+// may extend beyond the end of the range to be mutated, and needs to be
+// SplitAfter to ensure that only the part of the segment within the range is
+// mutated. When iterating segments in order of decreasing keys, SplitBefore
+// and SplitAfter exchange roles; i.e. SplitBefore needs to be invoked on each
+// segment, while SplitAfter only needs to be invoked on the first.
+//
+// Preconditions: seg.Start() < end.
+func (s *addrSet) SplitAfter(seg addrIterator, end uintptr) addrIterator {
+	if seg.Range().CanSplitAt(end) {
+		seg, _ = s.SplitUnchecked(seg, end)
+	}
+	return seg
+}
+
+// Isolate ensures that the given segment's range is a subset of r by splitting
+// at r.Start and r.End if necessary, and returns an updated iterator to the
+// bounded segment. All existing iterators (including seg, but not including
+// the returned iterators) are invalidated.
+//
+// Isolate is usually used when mutating part of a single segment, or when
+// mutating segments in a range where the first segment is not necessarily
+// split, making use of SplitBefore/SplitAfter complex.
+//
+// Preconditions: seg.Range().Overlaps(r).
 func (s *addrSet) Isolate(seg addrIterator, r addrRange) addrIterator {
 	if seg.Range().CanSplitAt(r.Start) {
 		_, seg = s.SplitUnchecked(seg, r.Start)
@@ -547,32 +810,118 @@ func (s *addrSet) Isolate(seg addrIterator, r addrRange) addrIterator {
 	return seg
 }
 
-// ApplyContiguous applies a function to a contiguous range of segments,
-// splitting if necessary. The function is applied until the first gap is
-// encountered, at which point the gap is returned. If the function is applied
-// across the entire range, a terminal gap is returned. All existing iterators
-// are invalidated.
+// LowerBoundSegmentSplitBefore combines LowerBoundSegment and SplitBefore.
 //
-// N.B. The Iterator must not be invalidated by the function.
-func (s *addrSet) ApplyContiguous(r addrRange, fn func(seg addrIterator)) addrGapIterator {
-	seg, gap := s.Find(r.Start)
-	if !seg.Ok() {
-		return gap
+// LowerBoundSegmentSplitBefore is usually used when mutating segments in a
+// range while iterating them in order of increasing keys. In such cases,
+// LowerBoundSegmentSplitBefore provides an iterator to the first segment to be
+// mutated, suitable as the initial value for a loop variable.
+func (s *addrSet) LowerBoundSegmentSplitBefore(min uintptr) addrIterator {
+	seg := s.LowerBoundSegment(min)
+	if seg.Ok() {
+		seg = s.SplitBefore(seg, min)
+	}
+	return seg
+}
+
+// UpperBoundSegmentSplitAfter combines UpperBoundSegment and SplitAfter.
+//
+// UpperBoundSegmentSplitAfter is usually used when mutating segments in a
+// range while iterating them in order of decreasing keys. In such cases,
+// UpperBoundSegmentSplitAfter provides an iterator to the first segment to be
+// mutated, suitable as the initial value for a loop variable.
+func (s *addrSet) UpperBoundSegmentSplitAfter(max uintptr) addrIterator {
+	seg := s.UpperBoundSegment(max)
+	if seg.Ok() {
+		seg = s.SplitAfter(seg, max)
+	}
+	return seg
+}
+
+// VisitRange applies the function f to all segments intersecting the range r,
+// in order of ascending keys. Segments will not be split, so f may be called
+// on segments lying partially outside r. Non-empty gaps between segments are
+// skipped. If a call to f returns false, VisitRange stops iteration
+// immediately.
+//
+// N.B. f must not invalidate iterators into s.
+func (s *addrSet) VisitRange(r addrRange, f func(seg addrIterator) bool) {
+	for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+		if !f(seg) {
+			return
+		}
 	}
+}
+
+// VisitFullRange is equivalent to VisitRange, except that if any key in r that
+// is visited before f returns false does not correspond to a segment,
+// VisitFullRange panics.
+func (s *addrSet) VisitFullRange(r addrRange, f func(seg addrIterator) bool) {
+	pos := r.Start
+	seg := s.FindSegment(r.Start)
 	for {
-		seg = s.Isolate(seg, r)
-		fn(seg)
-		if seg.End() >= r.End {
-			return addrGapIterator{}
+		if !seg.Ok() {
+			panic(fmt.Sprintf("missing segment at %v", pos))
 		}
-		gap = seg.NextGap()
-		if !gap.IsEmpty() {
-			return gap
+		if !f(seg) {
+			return
 		}
-		seg = gap.NextSegment()
-		if !seg.Ok() {
+		pos = seg.End()
+		if r.End <= pos {
+			return
+		}
+		seg, _ = seg.NextNonEmpty()
+	}
+}
 
-			return addrGapIterator{}
+// MutateRange applies the function f to all segments intersecting the range r,
+// in order of ascending keys. Segments that lie partially outside r are split
+// before f is called, such that f only observes segments entirely within r.
+// Iterated segments are merged again after f is called. Non-empty gaps between
+// segments are skipped. If a call to f returns false, MutateRange stops
+// iteration immediately.
+//
+// MutateRange invalidates all existing iterators.
+//
+// N.B. f must not invalidate iterators into s.
+func (s *addrSet) MutateRange(r addrRange, f func(seg addrIterator) bool) {
+	seg := s.LowerBoundSegmentSplitBefore(r.Start)
+	for seg.Ok() && seg.Start() < r.End {
+		seg = s.SplitAfter(seg, r.End)
+		cont := f(seg)
+		seg = s.MergePrev(seg)
+		if !cont {
+			s.MergeNext(seg)
+			return
+		}
+		seg = seg.NextSegment()
+	}
+	if seg.Ok() {
+		s.MergePrev(seg)
+	}
+}
+
+// MutateFullRange is equivalent to MutateRange, except that if any key in r
+// that is visited before f returns false does not correspond to a segment,
+// MutateFullRange panics.
+func (s *addrSet) MutateFullRange(r addrRange, f func(seg addrIterator) bool) {
+	seg := s.FindSegment(r.Start)
+	if !seg.Ok() {
+		panic(fmt.Sprintf("missing segment at %v", r.Start))
+	}
+	seg = s.SplitBefore(seg, r.Start)
+	for {
+		seg = s.SplitAfter(seg, r.End)
+		cont := f(seg)
+		end := seg.End()
+		seg = s.MergePrev(seg)
+		if !cont || r.End <= end {
+			s.MergeNext(seg)
+			return
+		}
+		seg = seg.NextSegment()
+		if !seg.Ok() || seg.Start() != end {
+			panic(fmt.Sprintf("missing segment at %v", end))
 		}
 	}
 }
@@ -1243,11 +1592,10 @@ func (seg addrIterator) NextGap() addrGapIterator {
 // Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
 // non-terminal.
 func (seg addrIterator) PrevNonEmpty() (addrIterator, addrGapIterator) {
-	gap := seg.PrevGap()
-	if gap.Range().Length() != 0 {
-		return addrIterator{}, gap
+	if prev := seg.PrevSegment(); prev.Ok() && prev.End() == seg.Start() {
+		return prev, addrGapIterator{}
 	}
-	return gap.PrevSegment(), addrGapIterator{}
+	return addrIterator{}, seg.PrevGap()
 }
 
 // NextNonEmpty returns the iterated segment's successor if it is adjacent, or
@@ -1256,11 +1604,10 @@ func (seg addrIterator) PrevNonEmpty() (addrIterator, addrGapIterator) {
 // Otherwise, exactly one of the iterators returned by NextNonEmpty will be
 // non-terminal.
 func (seg addrIterator) NextNonEmpty() (addrIterator, addrGapIterator) {
-	gap := seg.NextGap()
-	if gap.Range().Length() != 0 {
-		return addrIterator{}, gap
+	if next := seg.NextSegment(); next.Ok() && next.Start() == seg.End() {
+		return next, addrGapIterator{}
 	}
-	return gap.NextSegment(), addrGapIterator{}
+	return addrIterator{}, seg.NextGap()
 }
 
 // A GapIterator is conceptually one of:
@@ -1379,35 +1726,36 @@ func (gap addrGapIterator) NextLargeEnoughGap(minSize uintptr) addrGapIterator {
 //
 // Preconditions: gap is NOT the trailing gap of a non-leaf node.
 func (gap addrGapIterator) nextLargeEnoughGapHelper(minSize uintptr) addrGapIterator {
+	for {
 
-	for gap.node != nil &&
-		(gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
-		gap.node, gap.index = gap.node.parent, gap.node.parentIndex
-	}
+		for gap.node != nil &&
+			(gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+			gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+		}
 
-	if gap.node == nil {
-		return addrGapIterator{}
-	}
+		if gap.node == nil {
+			return addrGapIterator{}
+		}
 
-	gap.index++
-	for gap.index <= gap.node.nrSegments {
-		if gap.node.hasChildren {
-			if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
-				return largeEnoughGap
-			}
-		} else {
-			if gap.Range().Length() >= minSize {
-				return gap
+		gap.index++
+		for gap.index <= gap.node.nrSegments {
+			if gap.node.hasChildren {
+				if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+					return largeEnoughGap
+				}
+			} else {
+				if gap.Range().Length() >= minSize {
+					return gap
+				}
 			}
+			gap.index++
 		}
-		gap.index++
-	}
-	gap.node, gap.index = gap.node.parent, gap.node.parentIndex
-	if gap.node != nil && gap.index == gap.node.nrSegments {
-
 		gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+		if gap.node != nil && gap.index == gap.node.nrSegments {
+
+			gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+		}
 	}
-	return gap.nextLargeEnoughGapHelper(minSize)
 }
 
 // PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
@@ -1433,35 +1781,36 @@ func (gap addrGapIterator) PrevLargeEnoughGap(minSize uintptr) addrGapIterator {
 //
 // Preconditions: gap is NOT the first gap of a non-leaf node.
 func (gap addrGapIterator) prevLargeEnoughGapHelper(minSize uintptr) addrGapIterator {
+	for {
 
-	for gap.node != nil &&
-		(gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
-		gap.node, gap.index = gap.node.parent, gap.node.parentIndex
-	}
+		for gap.node != nil &&
+			(gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+			gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+		}
 
-	if gap.node == nil {
-		return addrGapIterator{}
-	}
+		if gap.node == nil {
+			return addrGapIterator{}
+		}
 
-	gap.index--
-	for gap.index >= 0 {
-		if gap.node.hasChildren {
-			if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
-				return largeEnoughGap
-			}
-		} else {
-			if gap.Range().Length() >= minSize {
-				return gap
+		gap.index--
+		for gap.index >= 0 {
+			if gap.node.hasChildren {
+				if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+					return largeEnoughGap
+				}
+			} else {
+				if gap.Range().Length() >= minSize {
+					return gap
+				}
 			}
+			gap.index--
 		}
-		gap.index--
-	}
-	gap.node, gap.index = gap.node.parent, gap.node.parentIndex
-	if gap.node != nil && gap.index == 0 {
-
 		gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+		if gap.node != nil && gap.index == 0 {
+
+			gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+		}
 	}
-	return gap.prevLargeEnoughGapHelper(minSize)
 }
 
 // segmentBeforePosition returns the predecessor segment of the position given
@@ -1545,50 +1894,49 @@ func (n *addrnode) writeDebugString(buf *bytes.Buffer, prefix string) {
 	}
 }
 
-// SegmentDataSlices represents segments from a set as slices of start, end, and
-// values. SegmentDataSlices is primarily used as an intermediate representation
-// for save/restore and the layout here is optimized for that.
+// FlatSegment represents a segment as a single object. FlatSegment is used as
+// an intermediate representation for save/restore and tests.
 //
 // +stateify savable
-type addrSegmentDataSlices struct {
-	Start  []uintptr
-	End    []uintptr
-	Values []*objectEncodeState
+type addrFlatSegment struct {
+	Start uintptr
+	End   uintptr
+	Value *objectEncodeState
 }
 
-// ExportSortedSlices returns a copy of all segments in the given set, in
-// ascending key order.
-func (s *addrSet) ExportSortedSlices() *addrSegmentDataSlices {
-	var sds addrSegmentDataSlices
+// ExportSlice returns a copy of all segments in the given set, in ascending
+// key order.
+func (s *addrSet) ExportSlice() []addrFlatSegment {
+	var fs []addrFlatSegment
 	for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
-		sds.Start = append(sds.Start, seg.Start())
-		sds.End = append(sds.End, seg.End())
-		sds.Values = append(sds.Values, seg.Value())
+		fs = append(fs, addrFlatSegment{
+			Start: seg.Start(),
+			End:   seg.End(),
+			Value: seg.Value(),
+		})
 	}
-	sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
-	sds.End = sds.End[:len(sds.End):len(sds.End)]
-	sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
-	return &sds
+	return fs
 }
 
-// ImportSortedSlices initializes the given set from the given slice.
+// ImportSlice initializes the given set from the given slice.
 //
 // Preconditions:
 //   - s must be empty.
-//   - sds must represent a valid set (the segments in sds must have valid
+//   - fs must represent a valid set (the segments in fs must have valid
 //     lengths that do not overlap).
-//   - The segments in sds must be sorted in ascending key order.
-func (s *addrSet) ImportSortedSlices(sds *addrSegmentDataSlices) error {
+//   - The segments in fs must be sorted in ascending key order.
+func (s *addrSet) ImportSlice(fs []addrFlatSegment) error {
 	if !s.IsEmpty() {
 		return fmt.Errorf("cannot import into non-empty set %v", s)
 	}
 	gap := s.FirstGap()
-	for i := range sds.Start {
-		r := addrRange{sds.Start[i], sds.End[i]}
+	for i := range fs {
+		f := &fs[i]
+		r := addrRange{f.Start, f.End}
 		if !gap.Range().IsSupersetOf(r) {
-			return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+			return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: %v => %v", r, f.Value)
 		}
-		gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+		gap = s.InsertWithoutMerging(gap, r, f.Value).NextGap()
 	}
 	return nil
 }
@@ -1632,12 +1980,15 @@ func (s *addrSet) countSegments() (segments int) {
 	}
 	return segments
 }
-func (s *addrSet) saveRoot() *addrSegmentDataSlices {
-	return s.ExportSortedSlices()
+func (s *addrSet) saveRoot() []addrFlatSegment {
+	fs := s.ExportSlice()
+
+	fs = fs[:len(fs):len(fs)]
+	return fs
 }
 
-func (s *addrSet) loadRoot(sds *addrSegmentDataSlices) {
-	if err := s.ImportSortedSlices(sds); err != nil {
+func (s *addrSet) loadRoot(_ context.Context, fs []addrFlatSegment) {
+	if err := s.ImportSlice(fs); err != nil {
 		panic(err)
 	}
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/state/decode.go b/vendor/gvisor.dev/gvisor/pkg/state/decode.go
index 777d7768..79beeb01 100644
--- a/vendor/gvisor.dev/gvisor/pkg/state/decode.go
+++ b/vendor/gvisor.dev/gvisor/pkg/state/decode.go
@@ -18,6 +18,7 @@ import (
 	"bytes"
 	"context"
 	"fmt"
+	"io"
 	"math"
 	"reflect"
 
@@ -142,7 +143,7 @@ type decodeState struct {
 	ctx context.Context
 
 	// r is the input stream.
-	r wire.Reader
+	r io.Reader
 
 	// types is the type database.
 	types typeDecodeDatabase
@@ -244,7 +245,7 @@ func (ds *decodeState) waitObject(ods *objectDecodeState, encoded wire.Object, c
 		// See decodeObject; we need to wait for the array (if non-nil).
 		ds.wait(ods, objectID(sv.Ref.Root), callback)
 	} else if iv, ok := encoded.(*wire.Interface); ok {
-		// It's an interface (wait recurisvely).
+		// It's an interface (wait recursively).
 		ds.waitObject(ods, iv.Value, callback)
 	} else if callback != nil {
 		// Nothing to wait for: execute the callback immediately.
@@ -385,7 +386,7 @@ func (ds *decodeState) decodeStruct(ods *objectDecodeState, obj reflect.Value, e
 	if sl, ok := obj.Addr().Interface().(SaverLoader); ok {
 		// Note: may be a registered empty struct which does not
 		// implement the saver/loader interfaces.
-		sl.StateLoad(Source{internal: od})
+		sl.StateLoad(ds.ctx, Source{internal: od})
 	}
 }
 
@@ -567,7 +568,7 @@ func (ds *decodeState) decodeObject(ods *objectDecodeState, obj reflect.Value, e
 	case *wire.Interface:
 		ds.decodeInterface(ods, obj, x)
 	default:
-		// Shoud not happen, not propagated as an error.
+		// Should not happen, not propagated as an error.
 		Failf("unknown object %#v for %q", encoded, obj.Type().Name())
 	}
 }
@@ -691,7 +692,7 @@ func (ds *decodeState) Load(obj reflect.Value) {
 			}
 		}
 	}); err != nil {
-		Failf("error executing callbacks for %#v: %w", ods.obj.Interface(), err)
+		Failf("error executing callbacks: %w\nfor object %#v", err, ods.obj.Interface())
 	}
 
 	// Check if we have any remaining dependency cycles. If there are any
@@ -717,7 +718,7 @@ func (ds *decodeState) Load(obj reflect.Value) {
 // Each object written to the statefile is prefixed with a header. See
 // WriteHeader for more information; these functions are exported to allow
 // non-state writes to the file to play nice with debugging tools.
-func ReadHeader(r wire.Reader) (length uint64, object bool, err error) {
+func ReadHeader(r io.Reader) (length uint64, object bool, err error) {
 	// Read the header.
 	err = safely(func() {
 		length = wire.LoadUint(r)
diff --git a/vendor/gvisor.dev/gvisor/pkg/state/encode.go b/vendor/gvisor.dev/gvisor/pkg/state/encode.go
index 9f15c3c2..ee1ce324 100644
--- a/vendor/gvisor.dev/gvisor/pkg/state/encode.go
+++ b/vendor/gvisor.dev/gvisor/pkg/state/encode.go
@@ -16,6 +16,7 @@ package state
 
 import (
 	"context"
+	"io"
 	"reflect"
 	"sort"
 
@@ -31,7 +32,7 @@ type objectEncodeState struct {
 
 	// obj is the object value. Note that this may be replaced if we
 	// encounter an object that contains this object. When this happens (in
-	// resolve), we will update existing references approprately, below,
+	// resolve), we will update existing references appropriately, below,
 	// and defer a re-encoding of the object.
 	obj reflect.Value
 
@@ -61,7 +62,7 @@ type encodeState struct {
 	ctx context.Context
 
 	// w is the output stream.
-	w wire.Writer
+	w io.Writer
 
 	// types is the type database.
 	types typeEncodeDatabase
@@ -417,7 +418,7 @@ func traverse(rootType, targetType reflect.Type, rootAddr, targetAddr uintptr) [
 		Failf("no field in root type %v contains target type %v", rootType, targetType)
 
 	case reflect.Array:
-		// Since arrays have homogenous types, all elements have the
+		// Since arrays have homogeneous types, all elements have the
 		// same size and we can compute where the target lives. This
 		// does not matter for the purpose of typing, but matters for
 		// the purpose of computing the address of the given index.
@@ -432,7 +433,7 @@ func traverse(rootType, targetType reflect.Type, rootAddr, targetAddr uintptr) [
 
 	default:
 		// For any other type, there's no possibility of aliasing so if
-		// the types didn't match earlier then we have an addresss
+		// the types didn't match earlier then we have an address
 		// collision which shouldn't be possible at this point.
 		Failf("traverse failed for root type %v and target type %v", rootType, targetType)
 	}
@@ -771,7 +772,7 @@ func (es *encodeState) Save(obj reflect.Value) {
 		}
 	}); err != nil {
 		// Include the object in the error message.
-		Failf("encoding error at object %#v: %w", oes.obj.Interface(), err)
+		Failf("encoding error: %w\nfor object %#v", err, oes.obj.Interface())
 	}
 
 	// Check that we have objects to serialize.
@@ -824,7 +825,7 @@ const objectFlag uint64 = 1 << 63
 // order to generate statefiles that play nicely with debugging tools, raw
 // writes should be prefixed with a header with object set to false and the
 // appropriate length. This will allow tools to skip these regions.
-func WriteHeader(w wire.Writer, length uint64, object bool) error {
+func WriteHeader(w io.Writer, length uint64, object bool) error {
 	// Sanity check the length.
 	if length&objectFlag != 0 {
 		Failf("impossibly huge length: %d", length)
diff --git a/vendor/gvisor.dev/gvisor/pkg/state/state.go b/vendor/gvisor.dev/gvisor/pkg/state/state.go
index 4a9e6ead..13519228 100644
--- a/vendor/gvisor.dev/gvisor/pkg/state/state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/state/state.go
@@ -50,6 +50,7 @@ package state
 import (
 	"context"
 	"fmt"
+	"io"
 	"reflect"
 	"runtime"
 
@@ -87,7 +88,7 @@ func (e *ErrState) Unwrap() error {
 }
 
 // Save saves the given object state.
-func Save(ctx context.Context, w wire.Writer, rootPtr any) (Stats, error) {
+func Save(ctx context.Context, w io.Writer, rootPtr any) (Stats, error) {
 	// Create the encoding state.
 	es := encodeState{
 		ctx:            ctx,
@@ -106,7 +107,7 @@ func Save(ctx context.Context, w wire.Writer, rootPtr any) (Stats, error) {
 }
 
 // Load loads a checkpoint.
-func Load(ctx context.Context, r wire.Reader, rootPtr any) (Stats, error) {
+func Load(ctx context.Context, r io.Reader, rootPtr any) (Stats, error) {
 	// Create the decoding state.
 	ds := decodeState{
 		ctx:      ctx,
@@ -211,7 +212,7 @@ type SaverLoader interface {
 	StateSave(Sink)
 
 	// StateLoad loads the state of the object.
-	StateLoad(Source)
+	StateLoad(context.Context, Source)
 }
 
 // Source is used for Type.StateLoad.
diff --git a/vendor/gvisor.dev/gvisor/pkg/state/types.go b/vendor/gvisor.dev/gvisor/pkg/state/types.go
index b96423e1..d3e1cbfe 100644
--- a/vendor/gvisor.dev/gvisor/pkg/state/types.go
+++ b/vendor/gvisor.dev/gvisor/pkg/state/types.go
@@ -198,7 +198,7 @@ var singleFieldOrder = []int{0}
 // Lookup looks up or registers the given object.
 //
 // First, the typeID is searched to see if this has already been appropriately
-// reconciled. If no, then a reconcilation will take place that may result in a
+// reconciled. If no, then a reconciliation will take place that may result in a
 // field ordering. If a nil reconciledTypeEntry is returned from this method,
 // then the object does not support the Type interface.
 //
diff --git a/vendor/gvisor.dev/gvisor/pkg/state/wire/wire.go b/vendor/gvisor.dev/gvisor/pkg/state/wire/wire.go
index 93dee674..db2547ae 100644
--- a/vendor/gvisor.dev/gvisor/pkg/state/wire/wire.go
+++ b/vendor/gvisor.dev/gvisor/pkg/state/wire/wire.go
@@ -33,18 +33,11 @@ import (
 	"math"
 
 	"gvisor.dev/gvisor/pkg/gohacks"
+	"gvisor.dev/gvisor/pkg/sync"
 )
 
-// Reader is the required reader interface.
-type Reader interface {
-	io.Reader
-	ReadByte() (byte, error)
-}
-
-// Writer is the required writer interface.
-type Writer interface {
-	io.Writer
-	WriteByte(byte) error
+var oneByteArrayPool = sync.Pool{
+	New: func() any { return &[1]byte{} },
 }
 
 // readFull is a utility. The equivalent is not needed for Write, but the API
@@ -65,25 +58,25 @@ type Object interface {
 	// save saves the given object.
 	//
 	// Panic is used for error control flow.
-	save(Writer)
+	save(io.Writer)
 
 	// load loads a new object of the given type.
 	//
 	// Panic is used for error control flow.
-	load(Reader) Object
+	load(io.Reader) Object
 }
 
 // Bool is a boolean.
 type Bool bool
 
 // loadBool loads an object of type Bool.
-func loadBool(r Reader) Bool {
+func loadBool(r io.Reader) Bool {
 	b := loadUint(r)
 	return Bool(b == 1)
 }
 
 // save implements Object.save.
-func (b Bool) save(w Writer) {
+func (b Bool) save(w io.Writer) {
 	var v Uint
 	if b {
 		v = 1
@@ -94,7 +87,7 @@ func (b Bool) save(w Writer) {
 }
 
 // load implements Object.load.
-func (Bool) load(r Reader) Object { return loadBool(r) }
+func (Bool) load(r io.Reader) Object { return loadBool(r) }
 
 // Int is a signed integer.
 //
@@ -102,7 +95,7 @@ func (Bool) load(r Reader) Object { return loadBool(r) }
 type Int int64
 
 // loadInt loads an object of type Int.
-func loadInt(r Reader) Int {
+func loadInt(r io.Reader) Int {
 	u := loadUint(r)
 	x := Int(u >> 1)
 	if u&1 != 0 {
@@ -112,7 +105,7 @@ func loadInt(r Reader) Int {
 }
 
 // save implements Object.save.
-func (i Int) save(w Writer) {
+func (i Int) save(w io.Writer) {
 	u := Uint(i) << 1
 	if i < 0 {
 		u = ^u
@@ -121,22 +114,29 @@ func (i Int) save(w Writer) {
 }
 
 // load implements Object.load.
-func (Int) load(r Reader) Object { return loadInt(r) }
+func (Int) load(r io.Reader) Object { return loadInt(r) }
 
 // Uint is an unsigned integer.
 type Uint uint64
 
+func readByte(r io.Reader) byte {
+	p := oneByteArrayPool.Get().(*[1]byte)
+	defer oneByteArrayPool.Put(p)
+	n, err := r.Read(p[:])
+	if n != 1 {
+		panic(err)
+	}
+	return p[0]
+}
+
 // loadUint loads an object of type Uint.
-func loadUint(r Reader) Uint {
+func loadUint(r io.Reader) Uint {
 	var (
 		u Uint
 		s uint
 	)
 	for i := 0; i <= 9; i++ {
-		b, err := r.ReadByte()
-		if err != nil {
-			panic(err)
-		}
+		b := readByte(r)
 		if b < 0x80 {
 			if i == 9 && b > 1 {
 				panic("overflow")
@@ -150,70 +150,76 @@ func loadUint(r Reader) Uint {
 	panic("unreachable")
 }
 
+func writeByte(w io.Writer, b byte) {
+	p := oneByteArrayPool.Get().(*[1]byte)
+	defer oneByteArrayPool.Put(p)
+	p[0] = b
+	n, err := w.Write(p[:])
+	if n != 1 {
+		panic(err)
+	}
+}
+
 // save implements Object.save.
-func (u Uint) save(w Writer) {
+func (u Uint) save(w io.Writer) {
 	for u >= 0x80 {
-		if err := w.WriteByte(byte(u) | 0x80); err != nil {
-			panic(err)
-		}
+		writeByte(w, byte(u)|0x80)
 		u >>= 7
 	}
-	if err := w.WriteByte(byte(u)); err != nil {
-		panic(err)
-	}
+	writeByte(w, byte(u))
 }
 
 // load implements Object.load.
-func (Uint) load(r Reader) Object { return loadUint(r) }
+func (Uint) load(r io.Reader) Object { return loadUint(r) }
 
 // Float32 is a 32-bit floating point number.
 type Float32 float32
 
 // loadFloat32 loads an object of type Float32.
-func loadFloat32(r Reader) Float32 {
+func loadFloat32(r io.Reader) Float32 {
 	n := loadUint(r)
 	return Float32(math.Float32frombits(uint32(n)))
 }
 
 // save implements Object.save.
-func (f Float32) save(w Writer) {
+func (f Float32) save(w io.Writer) {
 	n := Uint(math.Float32bits(float32(f)))
 	n.save(w)
 }
 
 // load implements Object.load.
-func (Float32) load(r Reader) Object { return loadFloat32(r) }
+func (Float32) load(r io.Reader) Object { return loadFloat32(r) }
 
 // Float64 is a 64-bit floating point number.
 type Float64 float64
 
 // loadFloat64 loads an object of type Float64.
-func loadFloat64(r Reader) Float64 {
+func loadFloat64(r io.Reader) Float64 {
 	n := loadUint(r)
 	return Float64(math.Float64frombits(uint64(n)))
 }
 
 // save implements Object.save.
-func (f Float64) save(w Writer) {
+func (f Float64) save(w io.Writer) {
 	n := Uint(math.Float64bits(float64(f)))
 	n.save(w)
 }
 
 // load implements Object.load.
-func (Float64) load(r Reader) Object { return loadFloat64(r) }
+func (Float64) load(r io.Reader) Object { return loadFloat64(r) }
 
 // Complex64 is a 64-bit complex number.
 type Complex64 complex128
 
 // loadComplex64 loads an object of type Complex64.
-func loadComplex64(r Reader) Complex64 {
+func loadComplex64(r io.Reader) Complex64 {
 	re := loadFloat32(r)
 	im := loadFloat32(r)
 	return Complex64(complex(float32(re), float32(im)))
 }
 
 // save implements Object.save.
-func (c *Complex64) save(w Writer) {
+func (c *Complex64) save(w io.Writer) {
 	re := Float32(real(*c))
 	im := Float32(imag(*c))
 	re.save(w)
@@ -221,7 +227,7 @@ func (c *Complex64) save(w Writer) {
 }
 
 // load implements Object.load.
-func (*Complex64) load(r Reader) Object {
+func (*Complex64) load(r io.Reader) Object {
 	c := loadComplex64(r)
 	return &c
 }
@@ -230,14 +236,14 @@ func (*Complex64) load(r Reader) Object {
 type Complex128 complex128
 
 // loadComplex128 loads an object of type Complex128.
-func loadComplex128(r Reader) Complex128 {
+func loadComplex128(r io.Reader) Complex128 {
 	re := loadFloat64(r)
 	im := loadFloat64(r)
 	return Complex128(complex(float64(re), float64(im)))
 }
 
 // save implements Object.save.
-func (c *Complex128) save(w Writer) {
+func (c *Complex128) save(w io.Writer) {
 	re := Float64(real(*c))
 	im := Float64(imag(*c))
 	re.save(w)
@@ -245,7 +251,7 @@ func (c *Complex128) save(w Writer) {
 }
 
 // load implements Object.load.
-func (*Complex128) load(r Reader) Object {
+func (*Complex128) load(r io.Reader) Object {
 	c := loadComplex128(r)
 	return &c
 }
@@ -254,7 +260,7 @@ func (*Complex128) load(r Reader) Object {
 type String string
 
 // loadString loads an object of type String.
-func loadString(r Reader) String {
+func loadString(r io.Reader) String {
 	l := loadUint(r)
 	p := make([]byte, l)
 	readFull(r, p)
@@ -262,7 +268,7 @@ func loadString(r Reader) String {
 }
 
 // save implements Object.save.
-func (s *String) save(w Writer) {
+func (s *String) save(w io.Writer) {
 	l := Uint(len(*s))
 	l.save(w)
 	p := gohacks.ImmutableBytesFromString(string(*s))
@@ -273,7 +279,7 @@ func (s *String) save(w Writer) {
 }
 
 // load implements Object.load.
-func (*String) load(r Reader) Object {
+func (*String) load(r io.Reader) Object {
 	s := loadString(r)
 	return &s
 }
@@ -309,7 +315,7 @@ type Ref struct {
 }
 
 // loadRef loads an object of type Ref (abstract).
-func loadRef(r Reader) Ref {
+func loadRef(r io.Reader) Ref {
 	ref := Ref{
 		Root: loadUint(r),
 	}
@@ -337,7 +343,7 @@ func loadRef(r Reader) Ref {
 }
 
 // save implements Object.save.
-func (r *Ref) save(w Writer) {
+func (r *Ref) save(w io.Writer) {
 	r.Root.save(w)
 	l := Uint(len(r.Dots))
 	l.save(w)
@@ -366,7 +372,7 @@ func (r *Ref) save(w Writer) {
 }
 
 // load implements Object.load.
-func (*Ref) load(r Reader) Object {
+func (*Ref) load(r io.Reader) Object {
 	ref := loadRef(r)
 	return &ref
 }
@@ -375,15 +381,15 @@ func (*Ref) load(r Reader) Object {
 type Nil struct{}
 
 // loadNil loads an object of type Nil.
-func loadNil(r Reader) Nil {
+func loadNil(r io.Reader) Nil {
 	return Nil{}
 }
 
 // save implements Object.save.
-func (Nil) save(w Writer) {}
+func (Nil) save(w io.Writer) {}
 
 // load implements Object.load.
-func (Nil) load(r Reader) Object { return loadNil(r) }
+func (Nil) load(r io.Reader) Object { return loadNil(r) }
 
 // Slice is a slice value.
 type Slice struct {
@@ -393,7 +399,7 @@ type Slice struct {
 }
 
 // loadSlice loads an object of type Slice.
-func loadSlice(r Reader) Slice {
+func loadSlice(r io.Reader) Slice {
 	return Slice{
 		Length:   loadUint(r),
 		Capacity: loadUint(r),
@@ -402,14 +408,14 @@ func loadSlice(r Reader) Slice {
 }
 
 // save implements Object.save.
-func (s *Slice) save(w Writer) {
+func (s *Slice) save(w io.Writer) {
 	s.Length.save(w)
 	s.Capacity.save(w)
 	s.Ref.save(w)
 }
 
 // load implements Object.load.
-func (*Slice) load(r Reader) Object {
+func (*Slice) load(r io.Reader) Object {
 	s := loadSlice(r)
 	return &s
 }
@@ -420,7 +426,7 @@ type Array struct {
 }
 
 // loadArray loads an object of type Array.
-func loadArray(r Reader) Array {
+func loadArray(r io.Reader) Array {
 	l := loadUint(r)
 	if l == 0 {
 		// Note that there isn't a single object available to encode
@@ -442,7 +448,7 @@ func loadArray(r Reader) Array {
 }
 
 // save implements Object.save.
-func (a *Array) save(w Writer) {
+func (a *Array) save(w io.Writer) {
 	l := Uint(len(a.Contents))
 	l.save(w)
 	if l == 0 {
@@ -457,7 +463,7 @@ func (a *Array) save(w Writer) {
 }
 
 // load implements Object.load.
-func (*Array) load(r Reader) Object {
+func (*Array) load(r io.Reader) Object {
 	a := loadArray(r)
 	return &a
 }
@@ -469,7 +475,7 @@ type Map struct {
 }
 
 // loadMap loads an object of type Map.
-func loadMap(r Reader) Map {
+func loadMap(r io.Reader) Map {
 	l := loadUint(r)
 	if l == 0 {
 		// See LoadArray.
@@ -493,7 +499,7 @@ func loadMap(r Reader) Map {
 }
 
 // save implements Object.save.
-func (m *Map) save(w Writer) {
+func (m *Map) save(w io.Writer) {
 	l := Uint(len(m.Keys))
 	if int(l) != len(m.Values) {
 		panic(fmt.Sprintf("mismatched keys (%d) Aand values (%d)", len(m.Keys), len(m.Values)))
@@ -513,7 +519,7 @@ func (m *Map) save(w Writer) {
 }
 
 // load implements Object.load.
-func (*Map) load(r Reader) Object {
+func (*Map) load(r io.Reader) Object {
 	m := loadMap(r)
 	return &m
 }
@@ -578,7 +584,7 @@ const (
 )
 
 // loadTypeSpec loads TypeSpec values.
-func loadTypeSpec(r Reader) TypeSpec {
+func loadTypeSpec(r io.Reader) TypeSpec {
 	switch hdr := loadUint(r); hdr {
 	case typeSpecTypeID:
 		return TypeID(loadUint(r))
@@ -609,7 +615,7 @@ func loadTypeSpec(r Reader) TypeSpec {
 }
 
 // saveTypeSpec saves TypeSpec values.
-func saveTypeSpec(w Writer, t TypeSpec) {
+func saveTypeSpec(w io.Writer, t TypeSpec) {
 	switch x := t.(type) {
 	case TypeID:
 		typeSpecTypeID.save(w)
@@ -643,7 +649,7 @@ type Interface struct {
 }
 
 // loadInterface loads an object of type Interface.
-func loadInterface(r Reader) Interface {
+func loadInterface(r io.Reader) Interface {
 	return Interface{
 		Type:  loadTypeSpec(r),
 		Value: Load(r),
@@ -651,13 +657,13 @@ func loadInterface(r Reader) Interface {
 }
 
 // save implements Object.save.
-func (i *Interface) save(w Writer) {
+func (i *Interface) save(w io.Writer) {
 	saveTypeSpec(w, i.Type)
 	Save(w, i.Value)
 }
 
 // load implements Object.load.
-func (*Interface) load(r Reader) Object {
+func (*Interface) load(r io.Reader) Object {
 	i := loadInterface(r)
 	return &i
 }
@@ -669,7 +675,7 @@ type Type struct {
 }
 
 // loadType loads an object of type Type.
-func loadType(r Reader) Type {
+func loadType(r io.Reader) Type {
 	name := string(loadString(r))
 	l := loadUint(r)
 	fields := make([]string, l)
@@ -683,7 +689,7 @@ func loadType(r Reader) Type {
 }
 
 // save implements Object.save.
-func (t *Type) save(w Writer) {
+func (t *Type) save(w io.Writer) {
 	s := String(t.Name)
 	s.save(w)
 	l := Uint(len(t.Fields))
@@ -695,7 +701,7 @@ func (t *Type) save(w Writer) {
 }
 
 // load implements Object.load.
-func (*Type) load(r Reader) Object {
+func (*Type) load(r io.Reader) Object {
 	t := loadType(r)
 	return &t
 }
@@ -704,7 +710,7 @@ func (*Type) load(r Reader) Object {
 type multipleObjects []Object
 
 // loadMultipleObjects loads a series of objects.
-func loadMultipleObjects(r Reader) multipleObjects {
+func loadMultipleObjects(r io.Reader) multipleObjects {
 	l := loadUint(r)
 	m := make(multipleObjects, l)
 	for i := 0; i < int(l); i++ {
@@ -714,7 +720,7 @@ func loadMultipleObjects(r Reader) multipleObjects {
 }
 
 // save implements Object.save.
-func (m *multipleObjects) save(w Writer) {
+func (m *multipleObjects) save(w io.Writer) {
 	l := Uint(len(*m))
 	l.save(w)
 	for i := 0; i < int(l); i++ {
@@ -723,7 +729,7 @@ func (m *multipleObjects) save(w Writer) {
 }
 
 // load implements Object.load.
-func (*multipleObjects) load(r Reader) Object {
+func (*multipleObjects) load(r io.Reader) Object {
 	m := loadMultipleObjects(r)
 	return &m
 }
@@ -732,13 +738,13 @@ func (*multipleObjects) load(r Reader) Object {
 type noObjects struct{}
 
 // loadNoObjects loads a sentinel.
-func loadNoObjects(r Reader) noObjects { return noObjects{} }
+func loadNoObjects(r io.Reader) noObjects { return noObjects{} }
 
 // save implements Object.save.
-func (noObjects) save(w Writer) {}
+func (noObjects) save(w io.Writer) {}
 
 // load implements Object.load.
-func (noObjects) load(r Reader) Object { return loadNoObjects(r) }
+func (noObjects) load(r io.Reader) Object { return loadNoObjects(r) }
 
 // Struct is a basic composite value.
 type Struct struct {
@@ -793,7 +799,7 @@ func (s *Struct) Fields() int {
 }
 
 // loadStruct loads an object of type Struct.
-func loadStruct(r Reader) Struct {
+func loadStruct(r io.Reader) Struct {
 	return Struct{
 		TypeID: TypeID(loadUint(r)),
 		fields: Load(r),
@@ -804,13 +810,13 @@ func loadStruct(r Reader) Struct {
 //
 // Precondition: Alloc must have been called, and the fields all filled in
 // appropriately. See Alloc and Add for more details.
-func (s *Struct) save(w Writer) {
+func (s *Struct) save(w io.Writer) {
 	Uint(s.TypeID).save(w)
 	Save(w, s.fields)
 }
 
 // load implements Object.load.
-func (*Struct) load(r Reader) Object {
+func (*Struct) load(r io.Reader) Object {
 	s := loadStruct(r)
 	return &s
 }
@@ -845,7 +851,7 @@ const (
 // +checkescape all
 //
 // N.B. This function will panic on error.
-func Save(w Writer, obj Object) {
+func Save(w io.Writer, obj Object) {
 	switch x := obj.(type) {
 	case Bool:
 		typeBool.save(w)
@@ -911,7 +917,7 @@ func Save(w Writer, obj Object) {
 // +checkescape all
 //
 // N.B. This function will panic on error.
-func Load(r Reader) Object {
+func Load(r io.Reader) Object {
 	switch hdr := loadUint(r); hdr {
 	case typeBool:
 		return loadBool(r)
@@ -958,13 +964,13 @@ func Load(r Reader) Object {
 // LoadUint loads a single unsigned integer.
 //
 // N.B. This function will panic on error.
-func LoadUint(r Reader) uint64 {
+func LoadUint(r io.Reader) uint64 {
 	return uint64(loadUint(r))
 }
 
 // SaveUint saves a single unsigned integer.
 //
 // N.B. This function will panic on error.
-func SaveUint(w Writer, v uint64) {
+func SaveUint(w io.Writer, v uint64) {
 	Uint(v).save(w)
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/gate_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/gate_unsafe.go
index 1f7a0330..0f3b58dc 100644
--- a/vendor/gvisor.dev/gvisor/pkg/sync/gate_unsafe.go
+++ b/vendor/gvisor.dev/gvisor/pkg/sync/gate_unsafe.go
@@ -140,8 +140,8 @@ func (g *Gate) Close() {
 		// The last call to Leave arrived while we were setting up closingG.
 		return
 	}
-	// WaitReasonSemacquire/TraceEvGoBlockSync are consistent with WaitGroup.
-	gopark(gateCommit, gohacks.Noescape(unsafe.Pointer(&g.closingG)), WaitReasonSemacquire, TraceEvGoBlockSync, 0)
+	// WaitReasonSemacquire/TraceBlockSync are consistent with WaitGroup.
+	gopark(gateCommit, gohacks.Noescape(unsafe.Pointer(&g.closingG)), WaitReasonSemacquire, TraceBlockSync, 0)
 }
 
 //go:norace
diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go
index 9a5a47a8..d6eef328 100644
--- a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go
+++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go
@@ -20,10 +20,3 @@ const (
 	WaitReasonChanReceive uint8 = 14 // +checkconst runtime waitReasonChanReceive
 	WaitReasonSemacquire  uint8 = 18 // +checkconst runtime waitReasonSemacquire
 )
-
-// Values for the traceEv argument to gopark, from Go's src/runtime/trace.go.
-const (
-	TraceEvGoBlockRecv   byte = 23 // +checkconst runtime traceEvGoBlockRecv
-	TraceEvGoBlockSelect byte = 24 // +checkconst runtime traceEvGoBlockSelect
-	TraceEvGoBlockSync   byte = 25 // +checkconst runtime traceEvGoBlockSync
-)
diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_exectracer2.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_exectracer2.go
new file mode 100644
index 00000000..58630af2
--- /dev/null
+++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_exectracer2.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sync
+
+// TraceBlockReason constants, from Go's src/runtime/trace2runtime.go.
+const (
+	TraceBlockSelect TraceBlockReason = 3 // +checkconst runtime traceBlockSelect
+	TraceBlockSync   TraceBlockReason = 5 // +checkconst runtime traceBlockSync
+)
diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s
index 85501e54..b6391d2b 100644
--- a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s
+++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s
@@ -15,4 +15,4 @@
 //go:build !amd64
 
 // This file is intentionally left blank. Other arches don't use
-// addrOfSpinning, but we still need an input to the nogo temlate rule.
+// addrOfSpinning, but we still need an input to the nogo template rule.
diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go
index a298bddb..5bc0a92e 100644
--- a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go
+++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go
@@ -29,12 +29,15 @@ func Goyield() {
 // splitting and race context are not available where it is called.
 //
 //go:nosplit
-func Gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceEv byte, traceskip int) {
-	gopark(unlockf, lock, reason, traceEv, traceskip)
+func Gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceReason TraceBlockReason, traceskip int) {
+	gopark(unlockf, lock, reason, traceReason, traceskip)
 }
 
 //go:linkname gopark runtime.gopark
-func gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceEv byte, traceskip int)
+func gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceReason TraceBlockReason, traceskip int)
+
+// TraceBlockReason is equivalent to runtime.traceBlockReason.
+type TraceBlockReason uint8
 
 //go:linkname wakep runtime.wakep
 func wakep()
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go
index 5e91f3dd..9ad06ab2 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go
@@ -179,6 +179,7 @@ func (d *deadlineTimer) setDeadline(cancelCh *chan struct{}, timer **time.Timer,
 	// "A zero value for t means I/O operations will not time out."
 	// - net.Conn.SetDeadline
 	if t.IsZero() {
+		*timer = nil
 		return
 	}
 
@@ -546,17 +547,15 @@ func DialContextTCP(ctx context.Context, s *stack.Stack, addr tcpip.FullAddress,
 type UDPConn struct {
 	deadlineTimer
 
-	stack *stack.Stack
-	ep    tcpip.Endpoint
-	wq    *waiter.Queue
+	ep tcpip.Endpoint
+	wq *waiter.Queue
 }
 
 // NewUDPConn creates a new UDPConn.
-func NewUDPConn(s *stack.Stack, wq *waiter.Queue, ep tcpip.Endpoint) *UDPConn {
+func NewUDPConn(wq *waiter.Queue, ep tcpip.Endpoint) *UDPConn {
 	c := &UDPConn{
-		stack: s,
-		ep:    ep,
-		wq:    wq,
+		ep: ep,
+		wq: wq,
 	}
 	c.deadlineTimer.init()
 	return c
@@ -586,7 +585,7 @@ func DialUDP(s *stack.Stack, laddr, raddr *tcpip.FullAddress, network tcpip.Netw
 		}
 	}
 
-	c := NewUDPConn(s, &wq, ep)
+	c := NewUDPConn(&wq, ep)
 
 	if raddr != nil {
 		if err := c.ep.Connect(*raddr); err != nil {
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go
index 78cc9fdd..0df3d885 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go
@@ -32,6 +32,8 @@ type Error interface {
 	fmt.Stringer
 }
 
+const maxErrno = 134
+
 // LINT.IfChange
 
 // ErrAborted indicates the operation was aborted.
@@ -274,6 +276,19 @@ func (*ErrDuplicateNICID) IgnoreStats() bool {
 }
 func (*ErrDuplicateNICID) String() string { return "duplicate nic id" }
 
+// ErrInvalidNICID indicates the operation used an invalid NIC ID.
+//
+// +stateify savable
+type ErrInvalidNICID struct{}
+
+func (*ErrInvalidNICID) isError() {}
+
+// IgnoreStats implements Error.
+func (*ErrInvalidNICID) IgnoreStats() bool {
+	return false
+}
+func (*ErrInvalidNICID) String() string { return "invalid nic id" }
+
 // ErrInvalidEndpointState indicates the endpoint is in an invalid state.
 //
 // +stateify savable
@@ -589,7 +604,7 @@ func (*ErrMissingRequiredFields) isError() {}
 func (*ErrMissingRequiredFields) IgnoreStats() bool {
 	return true
 }
-func (*ErrMissingRequiredFields) String() string { return "mising required fields" }
+func (*ErrMissingRequiredFields) String() string { return "missing required fields" }
 
 // ErrMulticastInputCannotBeOutput indicates that an input interface matches an
 // output interface in the same multicast route.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/errors_linux.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/errors_linux.go
new file mode 100644
index 00000000..0073568b
--- /dev/null
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/errors_linux.go
@@ -0,0 +1,74 @@
+// Copyright 2024 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package tcpip
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+// TranslateErrno translate an errno from the syscall package into a
+// tcpip Error.
+//
+// Valid, but unrecognized errnos will be translated to
+// *ErrInvalidEndpointState (EINVAL). This includes the "zero" value.
+func TranslateErrno(e unix.Errno) Error {
+	switch e {
+	case unix.EEXIST:
+		return &ErrDuplicateAddress{}
+	case unix.ENETUNREACH:
+		return &ErrHostUnreachable{}
+	case unix.EINVAL:
+		return &ErrInvalidEndpointState{}
+	case unix.EALREADY:
+		return &ErrAlreadyConnecting{}
+	case unix.EISCONN:
+		return &ErrAlreadyConnected{}
+	case unix.EADDRINUSE:
+		return &ErrPortInUse{}
+	case unix.EADDRNOTAVAIL:
+		return &ErrBadLocalAddress{}
+	case unix.EPIPE:
+		return &ErrClosedForSend{}
+	case unix.EWOULDBLOCK:
+		return &ErrWouldBlock{}
+	case unix.ECONNREFUSED:
+		return &ErrConnectionRefused{}
+	case unix.ETIMEDOUT:
+		return &ErrTimeout{}
+	case unix.EINPROGRESS:
+		return &ErrConnectStarted{}
+	case unix.EDESTADDRREQ:
+		return &ErrDestinationRequired{}
+	case unix.ENOTSUP:
+		return &ErrNotSupported{}
+	case unix.ENOTTY:
+		return &ErrQueueSizeNotSupported{}
+	case unix.ENOTCONN:
+		return &ErrNotConnected{}
+	case unix.ECONNRESET:
+		return &ErrConnectionReset{}
+	case unix.ECONNABORTED:
+		return &ErrConnectionAborted{}
+	case unix.EMSGSIZE:
+		return &ErrMessageTooLong{}
+	case unix.ENOBUFS:
+		return &ErrNoBufferSpace{}
+	default:
+		return &ErrInvalidEndpointState{}
+	}
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go
index 1cdda6b9..060b4a86 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go
@@ -57,6 +57,9 @@ func checksumUpdate2ByteAlignedUint16(xsum, old, new uint16) uint16 {
 	//        checksum C, the new checksum C' is:
 	//
 	//                C' = C + (-m) + m' = C + (m' - m)
+	if old == new {
+		return xsum
+	}
 	return checksum.Combine(xsum, checksum.Combine(new, ^old))
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go
index 11230ff4..d4575730 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go
@@ -46,6 +46,9 @@ const (
 	// EthernetMinimumSize is the minimum size of a valid ethernet frame.
 	EthernetMinimumSize = 14
 
+	// EthernetMaximumSize is the maximum size of a valid ethernet frame.
+	EthernetMaximumSize = 18
+
 	// EthernetAddressSize is the size, in bytes, of an ethernet address.
 	EthernetAddressSize = 6
 
@@ -82,7 +85,7 @@ const (
 	// capture all traffic.
 	EthernetProtocolAll tcpip.NetworkProtocolNumber = 0x0003
 
-	// EthernetProtocolPUP is the PARC Universial Packet protocol ethertype.
+	// EthernetProtocolPUP is the PARC Universal Packet protocol ethertype.
 	EthernetProtocolPUP tcpip.NetworkProtocolNumber = 0x0200
 )
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/header_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/header_state_autogen.go
index d9f84677..743b11b6 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/header_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/header_state_autogen.go
@@ -3,6 +3,8 @@
 package header
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -36,10 +38,10 @@ func (t *TCPSynOptions) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(6, &t.Flags)
 }
 
-func (t *TCPSynOptions) afterLoad() {}
+func (t *TCPSynOptions) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPSynOptions) StateLoad(stateSourceObject state.Source) {
+func (t *TCPSynOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.MSS)
 	stateSourceObject.Load(1, &t.WS)
 	stateSourceObject.Load(2, &t.TS)
@@ -69,10 +71,10 @@ func (r *SACKBlock) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &r.End)
 }
 
-func (r *SACKBlock) afterLoad() {}
+func (r *SACKBlock) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *SACKBlock) StateLoad(stateSourceObject state.Source) {
+func (r *SACKBlock) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.Start)
 	stateSourceObject.Load(1, &r.End)
 }
@@ -101,10 +103,10 @@ func (t *TCPOptions) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(3, &t.SACKBlocks)
 }
 
-func (t *TCPOptions) afterLoad() {}
+func (t *TCPOptions) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPOptions) StateLoad(stateSourceObject state.Source) {
+func (t *TCPOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.TS)
 	stateSourceObject.Load(1, &t.TSVal)
 	stateSourceObject.Load(2, &t.TSEcr)
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go
index 4e75ac40..ea1bfcd5 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go
@@ -53,7 +53,7 @@ const (
 	ICMPv6EchoMinimumSize = 8
 
 	// ICMPv6ErrorHeaderSize is the size of an ICMP error packet header,
-	// as per RFC 4443, Apendix A, item 4 and the errata.
+	// as per RFC 4443, Appendix A, item 4 and the errata.
 	//   ... all ICMP error messages shall have exactly
 	//   32 bits of type-specific data, so that receivers can reliably find
 	//   the embedded invoking packet even when they don't recognize the
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go
index 523441e8..fb6d86a3 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go
@@ -378,7 +378,7 @@ func (r IGMPv3ReportGroupAddressRecord) RecordType() IGMPv3ReportRecordType {
 	return IGMPv3ReportRecordType(r[igmpv3ReportGroupAddressRecordTypeOffset])
 }
 
-// AuxDataLen returns the length of the auxillary data in this record.
+// AuxDataLen returns the length of the auxiliary data in this record.
 func (r IGMPv3ReportGroupAddressRecord) AuxDataLen() int {
 	return int(r[igmpv3ReportGroupAddressRecordAuxDataLenOffset]) * igmpv3ReportGroupAddressRecordAuxDataLenUnits
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go
index d98c1c42..d6801199 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go
@@ -178,6 +178,9 @@ var (
 
 	// IPv4AllRoutersGroup is a multicast address for all routers.
 	IPv4AllRoutersGroup = tcpip.AddrFrom4([4]byte{0xe0, 0x00, 0x00, 0x02})
+
+	// IPv4Loopback is the loopback IPv4 address.
+	IPv4Loopback = tcpip.AddrFrom4([4]byte{0x7f, 0x00, 0x00, 0x01})
 )
 
 // Flags that may be set in an IPv4 packet.
@@ -346,6 +349,18 @@ func (b IPv4) DestinationAddress() tcpip.Address {
 	return tcpip.AddrFrom4([4]byte(b[dstAddr : dstAddr+IPv4AddressSize]))
 }
 
+// SourceAddressSlice returns the "source address" field of the IPv4 header as a
+// byte slice.
+func (b IPv4) SourceAddressSlice() []byte {
+	return []byte(b[srcAddr : srcAddr+IPv4AddressSize])
+}
+
+// DestinationAddressSlice returns the "destination address" field of the IPv4
+// header as a byte slice.
+func (b IPv4) DestinationAddressSlice() []byte {
+	return []byte(b[dstAddr : dstAddr+IPv4AddressSize])
+}
+
 // SetSourceAddressWithChecksumUpdate implements ChecksummableNetwork.
 func (b IPv4) SetSourceAddressWithChecksumUpdate(new tcpip.Address) {
 	b.SetChecksum(^checksumUpdate2ByteAlignedAddress(^b.Checksum(), b.SourceAddress(), new))
@@ -559,7 +574,7 @@ func IsV4LoopbackAddress(addr tcpip.Address) bool {
 
 // ========================= Options ==========================
 
-// An IPv4OptionType can hold the valuse for the Type in an IPv4 option.
+// An IPv4OptionType can hold the value for the Type in an IPv4 option.
 type IPv4OptionType byte
 
 // These constants are needed to identify individual options in the option list.
@@ -1137,9 +1152,7 @@ func (s IPv4OptionsSerializer) Serialize(b []byte) uint8 {
 	//  header ends on a 32 bit boundary. The padding is zero.
 	padded := padIPv4OptionsLength(total)
 	b = b[:padded-total]
-	for i := range b {
-		b[i] = 0
-	}
+	clear(b)
 	return padded
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6.go
index ed30f77b..4260095c 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6.go
@@ -225,6 +225,18 @@ func (b IPv6) DestinationAddress() tcpip.Address {
 	return tcpip.AddrFrom16([16]byte(b[v6DstAddr:][:IPv6AddressSize]))
 }
 
+// SourceAddressSlice returns the "source address" field of the ipv6 header as a
+// byte slice.
+func (b IPv6) SourceAddressSlice() []byte {
+	return []byte(b[v6SrcAddr:][:IPv6AddressSize])
+}
+
+// DestinationAddressSlice returns the "destination address" field of the ipv6
+// header as a byte slice.
+func (b IPv6) DestinationAddressSlice() []byte {
+	return []byte(b[v6DstAddr:][:IPv6AddressSize])
+}
+
 // Checksum implements Network.Checksum. Given that IPv6 doesn't have a
 // checksum, it just returns 0.
 func (IPv6) Checksum() uint16 {
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go
index 2577c900..7f75b82b 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go
@@ -110,7 +110,7 @@ const (
 
 	// IPv6FragmentExtHdrFragmentOffsetBytesPerUnit is the unit size of a Fragment
 	// extension header's Fragment Offset field. That is, given a Fragment Offset
-	// of 2, the extension header is indiciating that the fragment's payload
+	// of 2, the extension header is indicating that the fragment's payload
 	// starts at the 16th byte in the reassembled packet.
 	IPv6FragmentExtHdrFragmentOffsetBytesPerUnit = 8
 )
@@ -130,9 +130,7 @@ func padIPv6Option(b []byte) {
 		b[ipv6ExtHdrOptionTypeOffset] = uint8(ipv6Pad1ExtHdrOptionIdentifier)
 	default: // Pad with PadN.
 		s := b[ipv6ExtHdrOptionPayloadOffset:]
-		for i := range s {
-			s[i] = 0
-		}
+		clear(s)
 		b[ipv6ExtHdrOptionTypeOffset] = uint8(ipv6PadNExtHdrOptionIdentifier)
 		b[ipv6ExtHdrOptionLengthOffset] = uint8(len(s))
 	}
@@ -317,7 +315,7 @@ func (*IPv6UnknownExtHdrOption) isIPv6ExtHdrOption() {}
 //
 // The return is of the format (option, done, error). done will be true when
 // Next is unable to return anything because the iterator has reached the end of
-// the options data, or an error occured.
+// the options data, or an error occurred.
 func (i *IPv6OptionsExtHdrOptionsIterator) Next() (IPv6ExtHdrOption, bool, error) {
 	for {
 		i.optionOffset = i.nextOptionOffset
@@ -462,7 +460,7 @@ func (b IPv6FragmentExtHdr) More() bool {
 // ID returns the Identification field.
 //
 // This value is used to uniquely identify the packet, between a
-// souce and destination.
+// source and destination.
 func (b IPv6FragmentExtHdr) ID() uint32 {
 	return binary.BigEndian.Uint32(b[ipv6FragmentExtHdrIdentificationOffset:])
 }
@@ -568,7 +566,7 @@ func (i *IPv6PayloadIterator) AsRawHeader(consume bool) IPv6RawPayloadHeader {
 //
 // The return is of the format (header, done, error). done will be true when
 // Next is unable to return anything because the iterator has reached the end of
-// the payload, or an error occured.
+// the payload, or an error occurred.
 func (i *IPv6PayloadIterator) Next() (IPv6PayloadHeader, bool, error) {
 	i.headerOffset = i.nextOffset
 	i.parseOffset = 0
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go
index 0c33f579..3d1fbd19 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go
@@ -422,7 +422,7 @@ func (r MLDv2ReportMulticastAddressRecord) RecordType() MLDv2ReportRecordType {
 	return MLDv2ReportRecordType(r[mldv2ReportMulticastAddressRecordTypeOffset])
 }
 
-// AuxDataLen returns the length of the auxillary data in this record.
+// AuxDataLen returns the length of the auxiliary data in this record.
 func (r MLDv2ReportMulticastAddressRecord) AuxDataLen() int {
 	return int(r[mldv2ReportMulticastAddressRecordAuxDataLenOffset]) * mldv2ReportMulticastAddressRecordAuxDataLenUnits
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_advert.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_advert.go
index beb7fff4..7af42405 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_advert.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_advert.go
@@ -104,7 +104,7 @@ func (b NDPNeighborAdvert) SetOverrideFlag(f bool) {
 	}
 }
 
-// Options returns an NDPOptions of the the options body.
+// Options returns an NDPOptions of the options body.
 func (b NDPNeighborAdvert) Options() NDPOptions {
 	return NDPOptions(b[ndpNAOptionsOffset:])
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_solicit.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_solicit.go
index f39e0620..d571f91f 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_solicit.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_solicit.go
@@ -46,7 +46,7 @@ func (b NDPNeighborSolicit) SetTargetAddress(addr tcpip.Address) {
 	copy(b[ndpNSTargetAddessOffset:][:IPv6AddressSize], addr.AsSlice())
 }
 
-// Options returns an NDPOptions of the the options body.
+// Options returns an NDPOptions of the options body.
 func (b NDPNeighborSolicit) Options() NDPOptions {
 	return NDPOptions(b[ndpNSOptionsOffset:])
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go
index 1dc8111d..5fbae169 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go
@@ -63,7 +63,7 @@ const (
 	// ndpPrefixInformationLength is the expected length, in bytes, of the
 	// body of an NDP Prefix Information option, as per RFC 4861 section
 	// 4.6.2 which specifies that the Length field is 4. Given this, the
-	// expected length, in bytes, is 30 becuase 4 * lengthByteUnits (8) - 2
+	// expected length, in bytes, is 30 because 4 * lengthByteUnits (8) - 2
 	// (Type & Length) = 30.
 	ndpPrefixInformationLength = 30
 
@@ -173,7 +173,7 @@ var (
 )
 
 // Next returns the next element in the backing NDPOptions, or true if we are
-// done, or false if an error occured.
+// done, or false if an error occurred.
 //
 // The return can be read as option, done, error. Note, option should only be
 // used if done is false and error is nil.
@@ -339,8 +339,8 @@ func (b NDPOptions) Serialize(s NDPOptionsSerializer) int {
 		used := o.serializeInto(b[2:])
 
 		// Zero out remaining (padding) bytes, if any exists.
-		for i := used + 2; i < l; i++ {
-			b[i] = 0
+		if used+2 < l {
+			clear(b[used+2 : l])
 		}
 
 		b = b[l:]
@@ -566,9 +566,7 @@ func (o NDPPrefixInformation) serializeInto(b []byte) int {
 
 	// Zero out the Reserved2 field.
 	reserved2 := b[ndpPrefixInformationReserved2Offset:][:ndpPrefixInformationReserved2Length]
-	for i := range reserved2 {
-		reserved2[i] = 0
-	}
+	clear(reserved2)
 
 	return used
 }
@@ -687,9 +685,7 @@ func (o NDPRecursiveDNSServer) serializeInto(b []byte) int {
 	used := copy(b, o)
 
 	// Zero out the reserved bytes that are before the Lifetime field.
-	for i := 0; i < ndpRecursiveDNSServerLifetimeOffset; i++ {
-		b[i] = 0
-	}
+	clear(b[0:ndpRecursiveDNSServerLifetimeOffset])
 
 	return used
 }
@@ -782,9 +778,7 @@ func (o NDPDNSSearchList) serializeInto(b []byte) int {
 	used := copy(b, o)
 
 	// Zero out the reserved bytes that are before the Lifetime field.
-	for i := 0; i < ndpDNSSearchListLifetimeOffset; i++ {
-		b[i] = 0
-	}
+	clear(b[0:ndpDNSSearchListLifetimeOffset])
 
 	return used
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_router_advert.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_router_advert.go
index ef22b66f..e2456c00 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_router_advert.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_router_advert.go
@@ -198,7 +198,7 @@ func (b NDPRouterAdvert) RetransTimer() time.Duration {
 	return time.Millisecond * time.Duration(binary.BigEndian.Uint32(b[ndpRARetransTimerOffset:]))
 }
 
-// Options returns an NDPOptions of the the options body.
+// Options returns an NDPOptions of the options body.
 func (b NDPRouterAdvert) Options() NDPOptions {
 	return NDPOptions(b[ndpRAOptionsOffset:])
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_router_solicit.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_router_solicit.go
index 9e67ba95..5ca2e5cf 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_router_solicit.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_router_solicit.go
@@ -30,7 +30,7 @@ const (
 	ndpRSOptionsOffset = 4
 )
 
-// Options returns an NDPOptions of the the options body.
+// Options returns an NDPOptions of the options body.
 func (b NDPRouterSolicit) Options() NDPOptions {
 	return NDPOptions(b[ndpRSOptionsOffset:])
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go
index 33a85fdb..adcfd77c 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go
@@ -27,7 +27,7 @@ import (
 // pkt.Data.
 //
 // Returns true if the header was successfully parsed.
-func ARP(pkt stack.PacketBufferPtr) bool {
+func ARP(pkt *stack.PacketBuffer) bool {
 	_, ok := pkt.NetworkHeader().Consume(header.ARPSize)
 	if ok {
 		pkt.NetworkProtocolNumber = header.ARPProtocolNumber
@@ -39,7 +39,7 @@ func ARP(pkt stack.PacketBufferPtr) bool {
 // header with the IPv4 header.
 //
 // Returns true if the header was successfully parsed.
-func IPv4(pkt stack.PacketBufferPtr) bool {
+func IPv4(pkt *stack.PacketBuffer) bool {
 	hdr, ok := pkt.Data().PullUp(header.IPv4MinimumSize)
 	if !ok {
 		return false
@@ -71,7 +71,7 @@ func IPv4(pkt stack.PacketBufferPtr) bool {
 
 // IPv6 parses an IPv6 packet found in pkt.Data and populates pkt's network
 // header with the IPv6 header.
-func IPv6(pkt stack.PacketBufferPtr) (proto tcpip.TransportProtocolNumber, fragID uint32, fragOffset uint16, fragMore bool, ok bool) {
+func IPv6(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNumber, fragID uint32, fragOffset uint16, fragMore bool, ok bool) {
 	hdr, ok := pkt.Data().PullUp(header.IPv6MinimumSize)
 	if !ok {
 		return 0, 0, 0, false, false
@@ -157,7 +157,7 @@ traverseExtensions:
 // header with the UDP header.
 //
 // Returns true if the header was successfully parsed.
-func UDP(pkt stack.PacketBufferPtr) bool {
+func UDP(pkt *stack.PacketBuffer) bool {
 	_, ok := pkt.TransportHeader().Consume(header.UDPMinimumSize)
 	pkt.TransportProtocolNumber = header.UDPProtocolNumber
 	return ok
@@ -167,7 +167,7 @@ func UDP(pkt stack.PacketBufferPtr) bool {
 // header with the TCP header.
 //
 // Returns true if the header was successfully parsed.
-func TCP(pkt stack.PacketBufferPtr) bool {
+func TCP(pkt *stack.PacketBuffer) bool {
 	// TCP header is variable length, peek at it first.
 	hdrLen := header.TCPMinimumSize
 	hdr, ok := pkt.Data().PullUp(hdrLen)
@@ -191,7 +191,7 @@ func TCP(pkt stack.PacketBufferPtr) bool {
 // if present.
 //
 // Returns true if an ICMPv4 header was successfully parsed.
-func ICMPv4(pkt stack.PacketBufferPtr) bool {
+func ICMPv4(pkt *stack.PacketBuffer) bool {
 	if _, ok := pkt.TransportHeader().Consume(header.ICMPv4MinimumSize); ok {
 		pkt.TransportProtocolNumber = header.ICMPv4ProtocolNumber
 		return true
@@ -203,7 +203,7 @@ func ICMPv4(pkt stack.PacketBufferPtr) bool {
 // if present.
 //
 // Returns true if an ICMPv6 header was successfully parsed.
-func ICMPv6(pkt stack.PacketBufferPtr) bool {
+func ICMPv6(pkt *stack.PacketBuffer) bool {
 	hdr, ok := pkt.Data().PullUp(header.ICMPv6MinimumSize)
 	if !ok {
 		return false
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/tcp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/tcp.go
index 2d38928c..fe41e8d4 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/tcp.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/tcp.go
@@ -216,6 +216,15 @@ const (
 	// TCPHeaderMaximumSize is the maximum header size of a TCP packet.
 	TCPHeaderMaximumSize = TCPMinimumSize + TCPOptionsMaximumSize
 
+	// TCPTotalHeaderMaximumSize is the maximum size of headers from all layers in
+	// a TCP packet. It analogous to MAX_TCP_HEADER in Linux.
+	//
+	// TODO(b/319936470): Investigate why this needs to be at least 140 bytes. In
+	// Linux this value is at least 160, but in theory we should be able to use
+	// 138. In practice anything less than 140 starts to break GSO on gVNIC
+	// hardware.
+	TCPTotalHeaderMaximumSize = 160
+
 	// TCPProtocolNumber is TCP's transport protocol number.
 	TCPProtocolNumber tcpip.TransportProtocolNumber = 6
 
@@ -689,7 +698,7 @@ func Acceptable(segSeq seqnum.Value, segLen seqnum.Size, rcvNxt, rcvAcc seqnum.V
 		return segSeq.InRange(rcvNxt, rcvAcc.Add(1))
 	}
 	// Page 70 of RFC 793 allows packets that can be made "acceptable" by trimming
-	// the payload, so we'll accept any payload that overlaps the receieve window.
+	// the payload, so we'll accept any payload that overlaps the receive window.
 	// segSeq < rcvAcc is more correct according to RFC, however, Linux does it
 	// differently, it uses segSeq <= rcvAcc, we'd want to keep the same behavior
 	// as Linux.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/internal/tcp/tcp_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/internal/tcp/tcp_state_autogen.go
index e973a7bb..9aa457fe 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/internal/tcp/tcp_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/internal/tcp/tcp_state_autogen.go
@@ -3,6 +3,8 @@
 package tcp
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -24,10 +26,10 @@ func (offset *TSOffset) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &offset.milliseconds)
 }
 
-func (offset *TSOffset) afterLoad() {}
+func (offset *TSOffset) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (offset *TSOffset) StateLoad(stateSourceObject state.Source) {
+func (offset *TSOffset) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &offset.milliseconds)
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go
index b20ce900..66c95689 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go
@@ -28,12 +28,14 @@ import (
 // concurrency guards.
 //
 // See the tests in this package for example usage.
+//
+// +stateify savable
 type Endpoint struct {
 	child    stack.LinkEndpoint
 	embedder stack.NetworkDispatcher
 
 	// mu protects dispatcher.
-	mu         sync.RWMutex
+	mu         sync.RWMutex `state:"nosave"`
 	dispatcher stack.NetworkDispatcher
 }
 
@@ -51,7 +53,7 @@ func (e *Endpoint) Init(child stack.LinkEndpoint, embedder stack.NetworkDispatch
 }
 
 // DeliverNetworkPacket implements stack.NetworkDispatcher.
-func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
+func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
 	e.mu.RLock()
 	d := e.dispatcher
 	e.mu.RUnlock()
@@ -61,7 +63,7 @@ func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pk
 }
 
 // DeliverLinkPacket implements stack.NetworkDispatcher.
-func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
+func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
 	e.mu.RLock()
 	d := e.dispatcher
 	e.mu.RUnlock()
@@ -97,6 +99,11 @@ func (e *Endpoint) MTU() uint32 {
 	return e.child.MTU()
 }
 
+// SetMTU implements stack.LinkEndpoint.
+func (e *Endpoint) SetMTU(mtu uint32) {
+	e.child.SetMTU(mtu)
+}
+
 // Capabilities implements stack.LinkEndpoint.
 func (e *Endpoint) Capabilities() stack.LinkEndpointCapabilities {
 	return e.child.Capabilities()
@@ -112,6 +119,13 @@ func (e *Endpoint) LinkAddress() tcpip.LinkAddress {
 	return e.child.LinkAddress()
 }
 
+// SetLinkAddress implements stack.LinkEndpoint.SetLinkAddress.
+func (e *Endpoint) SetLinkAddress(addr tcpip.LinkAddress) {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+	e.child.SetLinkAddress(addr)
+}
+
 // WritePackets implements stack.LinkEndpoint.
 func (e *Endpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {
 	return e.child.WritePackets(pkts)
@@ -144,11 +158,21 @@ func (e *Endpoint) ARPHardwareType() header.ARPHardwareType {
 }
 
 // AddHeader implements stack.LinkEndpoint.AddHeader.
-func (e *Endpoint) AddHeader(pkt stack.PacketBufferPtr) {
+func (e *Endpoint) AddHeader(pkt *stack.PacketBuffer) {
 	e.child.AddHeader(pkt)
 }
 
 // ParseHeader implements stack.LinkEndpoint.ParseHeader.
-func (e *Endpoint) ParseHeader(pkt stack.PacketBufferPtr) bool {
+func (e *Endpoint) ParseHeader(pkt *stack.PacketBuffer) bool {
 	return e.child.ParseHeader(pkt)
 }
+
+// Close implements stack.LinkEndpoint.
+func (e *Endpoint) Close() {
+	e.child.Close()
+}
+
+// SetOnCloseAction implement stack.LinkEndpoints.
+func (e *Endpoint) SetOnCloseAction(action func()) {
+	e.child.SetOnCloseAction(action)
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested_state_autogen.go
index 9e1b5ca4..f53eb8e1 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested_state_autogen.go
@@ -1,3 +1,44 @@
 // automatically generated by stateify.
 
 package nested
+
+import (
+	"context"
+
+	"gvisor.dev/gvisor/pkg/state"
+)
+
+func (e *Endpoint) StateTypeName() string {
+	return "pkg/tcpip/link/nested.Endpoint"
+}
+
+func (e *Endpoint) StateFields() []string {
+	return []string{
+		"child",
+		"embedder",
+		"dispatcher",
+	}
+}
+
+func (e *Endpoint) beforeSave() {}
+
+// +checklocksignore
+func (e *Endpoint) StateSave(stateSinkObject state.Sink) {
+	e.beforeSave()
+	stateSinkObject.Save(0, &e.child)
+	stateSinkObject.Save(1, &e.embedder)
+	stateSinkObject.Save(2, &e.dispatcher)
+}
+
+func (e *Endpoint) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (e *Endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &e.child)
+	stateSourceObject.Load(1, &e.embedder)
+	stateSourceObject.Load(2, &e.dispatcher)
+}
+
+func init() {
+	state.Register((*Endpoint)(nil))
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/pcap.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/pcap.go
index 648852b2..491957ac 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/pcap.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/pcap.go
@@ -50,7 +50,7 @@ var _ encoding.BinaryMarshaler = (*pcapPacket)(nil)
 
 type pcapPacket struct {
 	timestamp     time.Time
-	packet        stack.PacketBufferPtr
+	packet        *stack.PacketBuffer
 	maxCaptureLen int
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer.go
index c3a18674..9fcff324 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer.go
@@ -44,6 +44,7 @@ var LogPackets atomicbitops.Uint32 = atomicbitops.FromUint32(1)
 // sniffer was created for this flag to have effect.
 var LogPacketsToPCAP atomicbitops.Uint32 = atomicbitops.FromUint32(1)
 
+// +stateify savable
 type endpoint struct {
 	nested.Endpoint
 	writer     io.Writer
@@ -133,14 +134,14 @@ func NewWithWriter(lower stack.LinkEndpoint, writer io.Writer, snapLen uint32) (
 // DeliverNetworkPacket implements the stack.NetworkDispatcher interface. It is
 // called by the link-layer endpoint being wrapped when a packet arrives, and
 // logs the packet before forwarding to the actual dispatcher.
-func (e *endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
+func (e *endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
 	e.dumpPacket(DirectionRecv, protocol, pkt)
 	e.Endpoint.DeliverNetworkPacket(protocol, pkt)
 }
 
-func (e *endpoint) dumpPacket(dir Direction, protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
+func (e *endpoint) dumpPacket(dir Direction, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
 	writer := e.writer
-	if writer == nil && LogPackets.Load() == 1 {
+	if LogPackets.Load() == 1 {
 		LogPacket(e.logPrefix, dir, protocol, pkt)
 	}
 	if writer != nil && LogPacketsToPCAP.Load() == 1 {
@@ -170,7 +171,7 @@ func (e *endpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error)
 }
 
 // LogPacket logs a packet to stdout.
-func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
+func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
 	// Figure out the network layer info.
 	var transProto uint8
 	var src tcpip.Address
@@ -190,15 +191,15 @@ func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumbe
 		panic(fmt.Sprintf("unrecognized direction: %d", dir))
 	}
 
-	pkt = trimmedClone(pkt)
-	defer pkt.DecRef()
+	clone := trimmedClone(pkt)
+	defer clone.DecRef()
 	switch protocol {
 	case header.IPv4ProtocolNumber:
-		if ok := parse.IPv4(pkt); !ok {
+		if ok := parse.IPv4(clone); !ok {
 			return
 		}
 
-		ipv4 := header.IPv4(pkt.NetworkHeader().Slice())
+		ipv4 := header.IPv4(clone.NetworkHeader().Slice())
 		fragmentOffset = ipv4.FragmentOffset()
 		moreFragments = ipv4.Flags()&header.IPv4FlagMoreFragments == header.IPv4FlagMoreFragments
 		src = ipv4.SourceAddress()
@@ -208,12 +209,12 @@ func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumbe
 		id = uint32(ipv4.ID())
 
 	case header.IPv6ProtocolNumber:
-		proto, fragID, fragOffset, fragMore, ok := parse.IPv6(pkt)
+		proto, fragID, fragOffset, fragMore, ok := parse.IPv6(clone)
 		if !ok {
 			return
 		}
 
-		ipv6 := header.IPv6(pkt.NetworkHeader().Slice())
+		ipv6 := header.IPv6(clone.NetworkHeader().Slice())
 		src = ipv6.SourceAddress()
 		dst = ipv6.DestinationAddress()
 		transProto = uint8(proto)
@@ -223,11 +224,11 @@ func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumbe
 		fragmentOffset = fragOffset
 
 	case header.ARPProtocolNumber:
-		if !parse.ARP(pkt) {
+		if !parse.ARP(clone) {
 			return
 		}
 
-		arp := header.ARP(pkt.NetworkHeader().Slice())
+		arp := header.ARP(clone.NetworkHeader().Slice())
 		log.Infof(
 			"%s%s arp %s (%s) -> %s (%s) valid:%t",
 			prefix,
@@ -250,7 +251,7 @@ func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumbe
 	switch tcpip.TransportProtocolNumber(transProto) {
 	case header.ICMPv4ProtocolNumber:
 		transName = "icmp"
-		hdr, ok := pkt.Data().PullUp(header.ICMPv4MinimumSize)
+		hdr, ok := clone.Data().PullUp(header.ICMPv4MinimumSize)
 		if !ok {
 			break
 		}
@@ -287,7 +288,7 @@ func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumbe
 
 	case header.ICMPv6ProtocolNumber:
 		transName = "icmp"
-		hdr, ok := pkt.Data().PullUp(header.ICMPv6MinimumSize)
+		hdr, ok := clone.Data().PullUp(header.ICMPv6MinimumSize)
 		if !ok {
 			break
 		}
@@ -322,11 +323,11 @@ func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumbe
 
 	case header.UDPProtocolNumber:
 		transName = "udp"
-		if ok := parse.UDP(pkt); !ok {
+		if ok := parse.UDP(clone); !ok {
 			break
 		}
 
-		udp := header.UDP(pkt.TransportHeader().Slice())
+		udp := header.UDP(clone.TransportHeader().Slice())
 		if fragmentOffset == 0 {
 			srcPort = udp.SourcePort()
 			dstPort = udp.DestinationPort()
@@ -336,18 +337,18 @@ func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumbe
 
 	case header.TCPProtocolNumber:
 		transName = "tcp"
-		if ok := parse.TCP(pkt); !ok {
+		if ok := parse.TCP(clone); !ok {
 			break
 		}
 
-		tcp := header.TCP(pkt.TransportHeader().Slice())
+		tcp := header.TCP(clone.TransportHeader().Slice())
 		if fragmentOffset == 0 {
 			offset := int(tcp.DataOffset())
 			if offset < header.TCPMinimumSize {
 				details += fmt.Sprintf("invalid packet: tcp data offset too small %d", offset)
 				break
 			}
-			if size := pkt.Data().Size() + len(tcp); offset > size && !moreFragments {
+			if size := clone.Data().Size() + len(tcp); offset > size && !moreFragments {
 				details += fmt.Sprintf("invalid packet: tcp data offset %d larger than tcp packet length %d", offset, size)
 				break
 			}
@@ -380,7 +381,7 @@ func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumbe
 
 // trimmedClone clones the packet buffer to not modify the original. It trims
 // anything before the network header.
-func trimmedClone(pkt stack.PacketBufferPtr) stack.PacketBufferPtr {
+func trimmedClone(pkt *stack.PacketBuffer) *stack.PacketBuffer {
 	// We don't clone the original packet buffer so that the new packet buffer
 	// does not have any of its headers set.
 	//
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer_state_autogen.go
index 8d79defe..169e7b7a 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer_state_autogen.go
@@ -1,3 +1,47 @@
 // automatically generated by stateify.
 
 package sniffer
+
+import (
+	"context"
+
+	"gvisor.dev/gvisor/pkg/state"
+)
+
+func (e *endpoint) StateTypeName() string {
+	return "pkg/tcpip/link/sniffer.endpoint"
+}
+
+func (e *endpoint) StateFields() []string {
+	return []string{
+		"Endpoint",
+		"writer",
+		"maxPCAPLen",
+		"logPrefix",
+	}
+}
+
+func (e *endpoint) beforeSave() {}
+
+// +checklocksignore
+func (e *endpoint) StateSave(stateSinkObject state.Sink) {
+	e.beforeSave()
+	stateSinkObject.Save(0, &e.Endpoint)
+	stateSinkObject.Save(1, &e.writer)
+	stateSinkObject.Save(2, &e.maxPCAPLen)
+	stateSinkObject.Save(3, &e.logPrefix)
+}
+
+func (e *endpoint) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (e *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &e.Endpoint)
+	stateSourceObject.Load(1, &e.writer)
+	stateSourceObject.Load(2, &e.maxPCAPLen)
+	stateSourceObject.Load(3, &e.logPrefix)
+}
+
+func init() {
+	state.Register((*endpoint)(nil))
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp.go
index 68786e10..e05f1889 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp.go
@@ -45,6 +45,7 @@ var _ ip.DADProtocol = (*endpoint)(nil)
 // the link-layer is via stack.NetworkEndpoint.HandlePacket.
 var _ stack.NetworkEndpoint = (*endpoint)(nil)
 
+// +stateify savable
 type endpoint struct {
 	protocol *protocol
 
@@ -55,7 +56,7 @@ type endpoint struct {
 	stats sharedStats
 
 	// mu protects annotated fields below.
-	mu sync.Mutex
+	mu sync.Mutex `state:"nosave"`
 
 	// +checklocks:mu
 	dad ip.DAD
@@ -133,7 +134,7 @@ func (e *endpoint) MaxHeaderLength() uint16 {
 
 func (*endpoint) Close() {}
 
-func (*endpoint) WritePacket(*stack.Route, stack.NetworkHeaderParams, stack.PacketBufferPtr) tcpip.Error {
+func (*endpoint) WritePacket(*stack.Route, stack.NetworkHeaderParams, *stack.PacketBuffer) tcpip.Error {
 	return &tcpip.ErrNotSupported{}
 }
 
@@ -142,11 +143,11 @@ func (*endpoint) NetworkProtocolNumber() tcpip.NetworkProtocolNumber {
 	return ProtocolNumber
 }
 
-func (*endpoint) WriteHeaderIncludedPacket(*stack.Route, stack.PacketBufferPtr) tcpip.Error {
+func (*endpoint) WriteHeaderIncludedPacket(*stack.Route, *stack.PacketBuffer) tcpip.Error {
 	return &tcpip.ErrNotSupported{}
 }
 
-func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) {
+func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {
 	stats := e.stats.arp
 	stats.packetsReceived.Increment()
 
@@ -257,6 +258,7 @@ func (e *endpoint) Stats() stack.NetworkEndpointStats {
 
 var _ stack.NetworkProtocol = (*protocol)(nil)
 
+// +stateify savable
 type protocol struct {
 	stack   *stack.Stack
 	options Options
@@ -278,7 +280,7 @@ func (p *protocol) NewEndpoint(nic stack.NetworkInterface, _ stack.TransportDisp
 	e.mu.Lock()
 	e.dad.Init(&e.mu, p.options.DADConfigs, ip.DADOptions{
 		Clock:     p.stack.Clock(),
-		SecureRNG: p.stack.SecureRNG(),
+		SecureRNG: p.stack.SecureRNG().Reader,
 		// ARP does not support sending nonce values.
 		NonceSize: 0,
 		Protocol:  e,
@@ -383,11 +385,13 @@ func (*protocol) Close() {}
 func (*protocol) Wait() {}
 
 // Parse implements stack.NetworkProtocol.Parse.
-func (*protocol) Parse(pkt stack.PacketBufferPtr) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) {
+func (*protocol) Parse(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) {
 	return 0, false, parse.ARP(pkt)
 }
 
 // Options holds options to configure a protocol.
+//
+// +stateify savable
 type Options struct {
 	// DADConfigs is the default DAD configurations used by ARP endpoints.
 	DADConfigs stack.DADConfigurations
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp_state_autogen.go
index 5cd8535e..69ace2ea 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp_state_autogen.go
@@ -1,3 +1,219 @@
 // automatically generated by stateify.
 
 package arp
+
+import (
+	"context"
+
+	"gvisor.dev/gvisor/pkg/state"
+)
+
+func (e *endpoint) StateTypeName() string {
+	return "pkg/tcpip/network/arp.endpoint"
+}
+
+func (e *endpoint) StateFields() []string {
+	return []string{
+		"protocol",
+		"enabled",
+		"nic",
+		"stats",
+		"dad",
+	}
+}
+
+func (e *endpoint) beforeSave() {}
+
+// +checklocksignore
+func (e *endpoint) StateSave(stateSinkObject state.Sink) {
+	e.beforeSave()
+	stateSinkObject.Save(0, &e.protocol)
+	stateSinkObject.Save(1, &e.enabled)
+	stateSinkObject.Save(2, &e.nic)
+	stateSinkObject.Save(3, &e.stats)
+	stateSinkObject.Save(4, &e.dad)
+}
+
+func (e *endpoint) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (e *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &e.protocol)
+	stateSourceObject.Load(1, &e.enabled)
+	stateSourceObject.Load(2, &e.nic)
+	stateSourceObject.Load(3, &e.stats)
+	stateSourceObject.Load(4, &e.dad)
+}
+
+func (p *protocol) StateTypeName() string {
+	return "pkg/tcpip/network/arp.protocol"
+}
+
+func (p *protocol) StateFields() []string {
+	return []string{
+		"stack",
+		"options",
+	}
+}
+
+func (p *protocol) beforeSave() {}
+
+// +checklocksignore
+func (p *protocol) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.stack)
+	stateSinkObject.Save(1, &p.options)
+}
+
+func (p *protocol) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *protocol) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.stack)
+	stateSourceObject.Load(1, &p.options)
+}
+
+func (o *Options) StateTypeName() string {
+	return "pkg/tcpip/network/arp.Options"
+}
+
+func (o *Options) StateFields() []string {
+	return []string{
+		"DADConfigs",
+	}
+}
+
+func (o *Options) beforeSave() {}
+
+// +checklocksignore
+func (o *Options) StateSave(stateSinkObject state.Sink) {
+	o.beforeSave()
+	stateSinkObject.Save(0, &o.DADConfigs)
+}
+
+func (o *Options) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (o *Options) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &o.DADConfigs)
+}
+
+func (s *Stats) StateTypeName() string {
+	return "pkg/tcpip/network/arp.Stats"
+}
+
+func (s *Stats) StateFields() []string {
+	return []string{
+		"ARP",
+	}
+}
+
+func (s *Stats) beforeSave() {}
+
+// +checklocksignore
+func (s *Stats) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.ARP)
+}
+
+func (s *Stats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *Stats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.ARP)
+}
+
+func (s *sharedStats) StateTypeName() string {
+	return "pkg/tcpip/network/arp.sharedStats"
+}
+
+func (s *sharedStats) StateFields() []string {
+	return []string{
+		"localStats",
+		"arp",
+	}
+}
+
+func (s *sharedStats) beforeSave() {}
+
+// +checklocksignore
+func (s *sharedStats) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.localStats)
+	stateSinkObject.Save(1, &s.arp)
+}
+
+func (s *sharedStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *sharedStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.localStats)
+	stateSourceObject.Load(1, &s.arp)
+}
+
+func (m *multiCounterARPStats) StateTypeName() string {
+	return "pkg/tcpip/network/arp.multiCounterARPStats"
+}
+
+func (m *multiCounterARPStats) StateFields() []string {
+	return []string{
+		"packetsReceived",
+		"disabledPacketsReceived",
+		"malformedPacketsReceived",
+		"requestsReceived",
+		"requestsReceivedUnknownTargetAddress",
+		"outgoingRequestInterfaceHasNoLocalAddressErrors",
+		"outgoingRequestBadLocalAddressErrors",
+		"outgoingRequestsDropped",
+		"outgoingRequestsSent",
+		"repliesReceived",
+		"outgoingRepliesDropped",
+		"outgoingRepliesSent",
+	}
+}
+
+func (m *multiCounterARPStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterARPStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.packetsReceived)
+	stateSinkObject.Save(1, &m.disabledPacketsReceived)
+	stateSinkObject.Save(2, &m.malformedPacketsReceived)
+	stateSinkObject.Save(3, &m.requestsReceived)
+	stateSinkObject.Save(4, &m.requestsReceivedUnknownTargetAddress)
+	stateSinkObject.Save(5, &m.outgoingRequestInterfaceHasNoLocalAddressErrors)
+	stateSinkObject.Save(6, &m.outgoingRequestBadLocalAddressErrors)
+	stateSinkObject.Save(7, &m.outgoingRequestsDropped)
+	stateSinkObject.Save(8, &m.outgoingRequestsSent)
+	stateSinkObject.Save(9, &m.repliesReceived)
+	stateSinkObject.Save(10, &m.outgoingRepliesDropped)
+	stateSinkObject.Save(11, &m.outgoingRepliesSent)
+}
+
+func (m *multiCounterARPStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterARPStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.packetsReceived)
+	stateSourceObject.Load(1, &m.disabledPacketsReceived)
+	stateSourceObject.Load(2, &m.malformedPacketsReceived)
+	stateSourceObject.Load(3, &m.requestsReceived)
+	stateSourceObject.Load(4, &m.requestsReceivedUnknownTargetAddress)
+	stateSourceObject.Load(5, &m.outgoingRequestInterfaceHasNoLocalAddressErrors)
+	stateSourceObject.Load(6, &m.outgoingRequestBadLocalAddressErrors)
+	stateSourceObject.Load(7, &m.outgoingRequestsDropped)
+	stateSourceObject.Load(8, &m.outgoingRequestsSent)
+	stateSourceObject.Load(9, &m.repliesReceived)
+	stateSourceObject.Load(10, &m.outgoingRepliesDropped)
+	stateSourceObject.Load(11, &m.outgoingRepliesSent)
+}
+
+func init() {
+	state.Register((*endpoint)(nil))
+	state.Register((*protocol)(nil))
+	state.Register((*Options)(nil))
+	state.Register((*Stats)(nil))
+	state.Register((*sharedStats)(nil))
+	state.Register((*multiCounterARPStats)(nil))
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/stats.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/stats.go
index 6d7194c6..f4974217 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/stats.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/stats.go
@@ -22,6 +22,8 @@ import (
 var _ stack.NetworkEndpointStats = (*Stats)(nil)
 
 // Stats holds statistics related to ARP.
+//
+// +stateify savable
 type Stats struct {
 	// ARP holds ARP statistics.
 	ARP tcpip.ARPStats
@@ -30,6 +32,7 @@ type Stats struct {
 // IsNetworkEndpointStats implements stack.NetworkEndpointStats.
 func (*Stats) IsNetworkEndpointStats() {}
 
+// +stateify savable
 type sharedStats struct {
 	localStats Stats
 	arp        multiCounterARPStats
@@ -37,6 +40,7 @@ type sharedStats struct {
 
 // LINT.IfChange(multiCounterARPStats)
 
+// +stateify savable
 type multiCounterARPStats struct {
 	packetsReceived                                 tcpip.MultiCounterStat
 	disabledPacketsReceived                         tcpip.MultiCounterStat
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go
index 39dc5ad0..8697d4e4 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go
@@ -60,6 +60,8 @@ var (
 )
 
 // FragmentID is the identifier for a fragment.
+//
+// +stateify savable
 type FragmentID struct {
 	// Source is the source address of the fragment.
 	Source tcpip.Address
@@ -78,8 +80,10 @@ type FragmentID struct {
 
 // Fragmentation is the main structure that other modules
 // of the stack should use to implement IP Fragmentation.
+//
+// +stateify savable
 type Fragmentation struct {
-	mu             sync.Mutex
+	mu             sync.Mutex `state:"nosave"`
 	highLimit      int
 	lowLimit       int
 	reassemblers   map[FragmentID]*reassembler
@@ -97,7 +101,7 @@ type TimeoutHandler interface {
 	// OnReassemblyTimeout will be called with the first fragment (or nil, if the
 	// first fragment has not been received) of a packet whose reassembly has
 	// timed out.
-	OnReassemblyTimeout(pkt stack.PacketBufferPtr)
+	OnReassemblyTimeout(pkt *stack.PacketBuffer)
 }
 
 // NewFragmentation creates a new Fragmentation.
@@ -155,8 +159,8 @@ func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, rea
 // to be given here outside of the FragmentID struct because IPv6 should not use
 // the protocol to identify a fragment.
 func (f *Fragmentation) Process(
-	id FragmentID, first, last uint16, more bool, proto uint8, pkt stack.PacketBufferPtr) (
-	stack.PacketBufferPtr, uint8, bool, error) {
+	id FragmentID, first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (
+	*stack.PacketBuffer, uint8, bool, error) {
 	if first > last {
 		return nil, 0, false, fmt.Errorf("first=%d is greater than last=%d: %w", first, last, ErrInvalidArgs)
 	}
@@ -251,12 +255,12 @@ func (f *Fragmentation) release(r *reassembler, timedOut bool) {
 	if h := f.timeoutHandler; timedOut && h != nil {
 		h.OnReassemblyTimeout(r.pkt)
 	}
-	if !r.pkt.IsNil() {
+	if r.pkt != nil {
 		r.pkt.DecRef()
 		r.pkt = nil
 	}
 	for _, h := range r.holes {
-		if !h.pkt.IsNil() {
+		if h.pkt != nil {
 			h.pkt.DecRef()
 			h.pkt = nil
 		}
@@ -308,7 +312,7 @@ type PacketFragmenter struct {
 //
 // reserve is the number of bytes that should be reserved for the headers in
 // each generated fragment.
-func MakePacketFragmenter(pkt stack.PacketBufferPtr, fragmentPayloadLen uint32, reserve int) PacketFragmenter {
+func MakePacketFragmenter(pkt *stack.PacketBuffer, fragmentPayloadLen uint32, reserve int) PacketFragmenter {
 	// As per RFC 8200 Section 4.5, some IPv6 extension headers should not be
 	// repeated in each fragment. However we do not currently support any header
 	// of that kind yet, so the following computation is valid for both IPv4 and
@@ -339,7 +343,7 @@ func MakePacketFragmenter(pkt stack.PacketBufferPtr, fragmentPayloadLen uint32,
 // Note that the returned packet will not have its network and link headers
 // populated, but space for them will be reserved. The transport header will be
 // stored in the packet's data.
-func (pf *PacketFragmenter) BuildNextFragment() (stack.PacketBufferPtr, int, int, bool) {
+func (pf *PacketFragmenter) BuildNextFragment() (*stack.PacketBuffer, int, int, bool) {
 	if pf.currentFragment >= pf.fragmentCount {
 		panic("BuildNextFragment should not be called again after the last fragment was returned")
 	}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go
index 21c5774e..2697d9a4 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go
@@ -3,9 +3,183 @@
 package fragmentation
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
+func (f *FragmentID) StateTypeName() string {
+	return "pkg/tcpip/network/internal/fragmentation.FragmentID"
+}
+
+func (f *FragmentID) StateFields() []string {
+	return []string{
+		"Source",
+		"Destination",
+		"ID",
+		"Protocol",
+	}
+}
+
+func (f *FragmentID) beforeSave() {}
+
+// +checklocksignore
+func (f *FragmentID) StateSave(stateSinkObject state.Sink) {
+	f.beforeSave()
+	stateSinkObject.Save(0, &f.Source)
+	stateSinkObject.Save(1, &f.Destination)
+	stateSinkObject.Save(2, &f.ID)
+	stateSinkObject.Save(3, &f.Protocol)
+}
+
+func (f *FragmentID) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (f *FragmentID) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &f.Source)
+	stateSourceObject.Load(1, &f.Destination)
+	stateSourceObject.Load(2, &f.ID)
+	stateSourceObject.Load(3, &f.Protocol)
+}
+
+func (f *Fragmentation) StateTypeName() string {
+	return "pkg/tcpip/network/internal/fragmentation.Fragmentation"
+}
+
+func (f *Fragmentation) StateFields() []string {
+	return []string{
+		"highLimit",
+		"lowLimit",
+		"reassemblers",
+		"rList",
+		"memSize",
+		"timeout",
+		"blockSize",
+		"clock",
+		"releaseJob",
+		"timeoutHandler",
+	}
+}
+
+func (f *Fragmentation) beforeSave() {}
+
+// +checklocksignore
+func (f *Fragmentation) StateSave(stateSinkObject state.Sink) {
+	f.beforeSave()
+	stateSinkObject.Save(0, &f.highLimit)
+	stateSinkObject.Save(1, &f.lowLimit)
+	stateSinkObject.Save(2, &f.reassemblers)
+	stateSinkObject.Save(3, &f.rList)
+	stateSinkObject.Save(4, &f.memSize)
+	stateSinkObject.Save(5, &f.timeout)
+	stateSinkObject.Save(6, &f.blockSize)
+	stateSinkObject.Save(7, &f.clock)
+	stateSinkObject.Save(8, &f.releaseJob)
+	stateSinkObject.Save(9, &f.timeoutHandler)
+}
+
+func (f *Fragmentation) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (f *Fragmentation) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &f.highLimit)
+	stateSourceObject.Load(1, &f.lowLimit)
+	stateSourceObject.Load(2, &f.reassemblers)
+	stateSourceObject.Load(3, &f.rList)
+	stateSourceObject.Load(4, &f.memSize)
+	stateSourceObject.Load(5, &f.timeout)
+	stateSourceObject.Load(6, &f.blockSize)
+	stateSourceObject.Load(7, &f.clock)
+	stateSourceObject.Load(8, &f.releaseJob)
+	stateSourceObject.Load(9, &f.timeoutHandler)
+}
+
+func (h *hole) StateTypeName() string {
+	return "pkg/tcpip/network/internal/fragmentation.hole"
+}
+
+func (h *hole) StateFields() []string {
+	return []string{
+		"first",
+		"last",
+		"filled",
+		"final",
+		"pkt",
+	}
+}
+
+func (h *hole) beforeSave() {}
+
+// +checklocksignore
+func (h *hole) StateSave(stateSinkObject state.Sink) {
+	h.beforeSave()
+	stateSinkObject.Save(0, &h.first)
+	stateSinkObject.Save(1, &h.last)
+	stateSinkObject.Save(2, &h.filled)
+	stateSinkObject.Save(3, &h.final)
+	stateSinkObject.Save(4, &h.pkt)
+}
+
+func (h *hole) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (h *hole) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &h.first)
+	stateSourceObject.Load(1, &h.last)
+	stateSourceObject.Load(2, &h.filled)
+	stateSourceObject.Load(3, &h.final)
+	stateSourceObject.Load(4, &h.pkt)
+}
+
+func (r *reassembler) StateTypeName() string {
+	return "pkg/tcpip/network/internal/fragmentation.reassembler"
+}
+
+func (r *reassembler) StateFields() []string {
+	return []string{
+		"reassemblerEntry",
+		"id",
+		"memSize",
+		"proto",
+		"holes",
+		"filled",
+		"done",
+		"createdAt",
+		"pkt",
+	}
+}
+
+func (r *reassembler) beforeSave() {}
+
+// +checklocksignore
+func (r *reassembler) StateSave(stateSinkObject state.Sink) {
+	r.beforeSave()
+	stateSinkObject.Save(0, &r.reassemblerEntry)
+	stateSinkObject.Save(1, &r.id)
+	stateSinkObject.Save(2, &r.memSize)
+	stateSinkObject.Save(3, &r.proto)
+	stateSinkObject.Save(4, &r.holes)
+	stateSinkObject.Save(5, &r.filled)
+	stateSinkObject.Save(6, &r.done)
+	stateSinkObject.Save(7, &r.createdAt)
+	stateSinkObject.Save(8, &r.pkt)
+}
+
+func (r *reassembler) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (r *reassembler) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &r.reassemblerEntry)
+	stateSourceObject.Load(1, &r.id)
+	stateSourceObject.Load(2, &r.memSize)
+	stateSourceObject.Load(3, &r.proto)
+	stateSourceObject.Load(4, &r.holes)
+	stateSourceObject.Load(5, &r.filled)
+	stateSourceObject.Load(6, &r.done)
+	stateSourceObject.Load(7, &r.createdAt)
+	stateSourceObject.Load(8, &r.pkt)
+}
+
 func (l *reassemblerList) StateTypeName() string {
 	return "pkg/tcpip/network/internal/fragmentation.reassemblerList"
 }
@@ -26,10 +200,10 @@ func (l *reassemblerList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *reassemblerList) afterLoad() {}
+func (l *reassemblerList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *reassemblerList) StateLoad(stateSourceObject state.Source) {
+func (l *reassemblerList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
@@ -54,15 +228,19 @@ func (e *reassemblerEntry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *reassemblerEntry) afterLoad() {}
+func (e *reassemblerEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *reassemblerEntry) StateLoad(stateSourceObject state.Source) {
+func (e *reassemblerEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
 
 func init() {
+	state.Register((*FragmentID)(nil))
+	state.Register((*Fragmentation)(nil))
+	state.Register((*hole)(nil))
+	state.Register((*reassembler)(nil))
 	state.Register((*reassemblerList)(nil))
 	state.Register((*reassemblerEntry)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go
index 873e034f..9aaad763 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go
@@ -23,6 +23,7 @@ import (
 	"gvisor.dev/gvisor/pkg/tcpip/stack"
 )
 
+// +stateify savable
 type hole struct {
 	first  uint16
 	last   uint16
@@ -30,20 +31,21 @@ type hole struct {
 	final  bool
 	// pkt is the fragment packet if hole is filled. We keep the whole pkt rather
 	// than the fragmented payload to prevent binding to specific buffer types.
-	pkt stack.PacketBufferPtr
+	pkt *stack.PacketBuffer
 }
 
+// +stateify savable
 type reassembler struct {
 	reassemblerEntry
 	id        FragmentID
 	memSize   int
 	proto     uint8
-	mu        sync.Mutex
+	mu        sync.Mutex `state:"nosave"`
 	holes     []hole
 	filled    int
 	done      bool
 	createdAt tcpip.MonotonicTime
-	pkt       stack.PacketBufferPtr
+	pkt       *stack.PacketBuffer
 }
 
 func newReassembler(id FragmentID, clock tcpip.Clock) *reassembler {
@@ -60,7 +62,7 @@ func newReassembler(id FragmentID, clock tcpip.Clock) *reassembler {
 	return r
 }
 
-func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt stack.PacketBufferPtr) (stack.PacketBufferPtr, uint8, bool, int, error) {
+func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (*stack.PacketBuffer, uint8, bool, int, error) {
 	r.mu.Lock()
 	defer r.mu.Unlock()
 	if r.done {
@@ -145,7 +147,7 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt st
 		// options received in the first fragment should be used - and they should
 		// override options from following fragments.
 		if first == 0 {
-			if !r.pkt.IsNil() {
+			if r.pkt != nil {
 				r.pkt.DecRef()
 			}
 			r.pkt = pkt.IncRef()
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/duplicate_address_detection.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/duplicate_address_detection.go
index 5123b7d6..66661f3c 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/duplicate_address_detection.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/duplicate_address_detection.go
@@ -33,6 +33,7 @@ const (
 	extended
 )
 
+// +stateify savable
 type dadState struct {
 	nonce         []byte
 	extendRequest extendRequest
@@ -50,9 +51,12 @@ type DADProtocol interface {
 }
 
 // DADOptions holds options for DAD.
+//
+// +stateify savable
 type DADOptions struct {
-	Clock              tcpip.Clock
-	SecureRNG          io.Reader
+	Clock tcpip.Clock
+	// TODO(b/341946753): Restore when netstack is savable.
+	SecureRNG          io.Reader `state:"nosave"`
 	NonceSize          uint8
 	ExtendDADTransmits uint8
 	Protocol           DADProtocol
@@ -60,11 +64,13 @@ type DADOptions struct {
 }
 
 // DAD performs duplicate address detection for addresses.
+//
+// +stateify savable
 type DAD struct {
 	opts    DADOptions
 	configs stack.DADConfigurations
 
-	protocolMU sync.Locker
+	protocolMU sync.Locker `state:"nosave"`
 	addresses  map[tcpip.Address]dadState
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go
index b381c4c0..c99a4fe2 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go
@@ -20,7 +20,7 @@ import (
 	"gvisor.dev/gvisor/pkg/tcpip"
 )
 
-// ForwardingError represents an error that occured while trying to forward
+// ForwardingError represents an error that occurred while trying to forward
 // a packet.
 type ForwardingError interface {
 	isForwardingError()
@@ -75,7 +75,7 @@ func (*ErrLinkLocalDestinationAddress) isForwardingError() {}
 
 func (*ErrLinkLocalDestinationAddress) String() string { return "link local destination address" }
 
-// ErrHostUnreachable indicates that the destinatino host could not be reached.
+// ErrHostUnreachable indicates that the destination host could not be reached.
 type ErrHostUnreachable struct{}
 
 func (*ErrHostUnreachable) isForwardingError() {}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go
index 884ea056..3e7ca67e 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go
@@ -105,6 +105,8 @@ const (
 
 // multicastGroupState holds the Generic Multicast Protocol state for a
 // multicast group.
+//
+// +stateify savable
 type multicastGroupState struct {
 	// joins is the number of times the group has been joined.
 	joins uint64
@@ -130,7 +132,8 @@ type multicastGroupState struct {
 	// delyedReportJobFiresAt is the time when the delayed report job will fire.
 	//
 	// A zero value indicates that the job is not scheduled.
-	delayedReportJobFiresAt time.Time
+	// TODO(b/341946753): Restore when netstack is savable.
+	delayedReportJobFiresAt time.Time `state:"nosave"`
 
 	// queriedIncludeSources holds sources that were queried for.
 	//
@@ -155,9 +158,12 @@ func (m *multicastGroupState) clearQueriedIncludeSources() {
 
 // GenericMulticastProtocolOptions holds options for the generic multicast
 // protocol.
+//
+// +stateify savable
 type GenericMulticastProtocolOptions struct {
 	// Rand is the source of random numbers.
-	Rand *rand.Rand
+	// TODO(b/341946753): Restore when netstack is savable.
+	Rand *rand.Rand `state:"nosave"`
 
 	// Clock is the clock used to create timers.
 	Clock tcpip.Clock
@@ -267,9 +273,11 @@ const (
 //
 // GenericMulticastProtocolState.MakeAllNonMemberLocked MUST be called when the
 // multicast group protocol is disabled so that leave messages may be sent.
+//
+// +stateify savable
 type GenericMulticastProtocolState struct {
 	// Do not allow overwriting this state.
-	_ sync.NoCopy
+	_ sync.NoCopy `state:"nosave"`
 
 	opts GenericMulticastProtocolOptions
 
@@ -277,7 +285,7 @@ type GenericMulticastProtocolState struct {
 	memberships map[tcpip.Address]multicastGroupState
 
 	// protocolMU is the mutex used to protect the protocol.
-	protocolMU *sync.RWMutex
+	protocolMU *sync.RWMutex `state:"nosave"`
 
 	// V2 state.
 	robustnessVariable uint8
@@ -285,8 +293,9 @@ type GenericMulticastProtocolState struct {
 	mode               protocolMode
 	modeTimer          tcpip.Timer
 
-	generalQueryV2Timer        tcpip.Timer
-	generalQueryV2TimerFiresAt time.Time
+	generalQueryV2Timer tcpip.Timer
+	// TODO(b/341946753): Restore when netstack is savable.
+	generalQueryV2TimerFiresAt time.Time `state:"nosave"`
 
 	stateChangedReportV2Timer    tcpip.Timer
 	stateChangedReportV2TimerSet bool
@@ -390,7 +399,9 @@ func (g *GenericMulticastProtocolState) MakeAllNonMemberLocked() {
 	switch g.mode {
 	case protocolModeV2:
 		v2ReportBuilder = g.opts.Protocol.NewReportV2Builder()
-		handler = func(groupAddress tcpip.Address, _ *multicastGroupState) {
+		handler = func(groupAddress tcpip.Address, info *multicastGroupState) {
+			info.cancelDelayedReportJob()
+
 			// Send a report immediately to announce us leaving the group.
 			v2ReportBuilder.AddRecord(
 				MulticastGroupProtocolV2ReportRecordChangeToIncludeMode,
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/ip_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/ip_state_autogen.go
index 4ca3d838..96a7a77a 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/ip_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/ip_state_autogen.go
@@ -3,9 +3,116 @@
 package ip
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
+func (d *dadState) StateTypeName() string {
+	return "pkg/tcpip/network/internal/ip.dadState"
+}
+
+func (d *dadState) StateFields() []string {
+	return []string{
+		"nonce",
+		"extendRequest",
+		"done",
+		"timer",
+		"completionHandlers",
+	}
+}
+
+func (d *dadState) beforeSave() {}
+
+// +checklocksignore
+func (d *dadState) StateSave(stateSinkObject state.Sink) {
+	d.beforeSave()
+	stateSinkObject.Save(0, &d.nonce)
+	stateSinkObject.Save(1, &d.extendRequest)
+	stateSinkObject.Save(2, &d.done)
+	stateSinkObject.Save(3, &d.timer)
+	stateSinkObject.Save(4, &d.completionHandlers)
+}
+
+func (d *dadState) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (d *dadState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &d.nonce)
+	stateSourceObject.Load(1, &d.extendRequest)
+	stateSourceObject.Load(2, &d.done)
+	stateSourceObject.Load(3, &d.timer)
+	stateSourceObject.Load(4, &d.completionHandlers)
+}
+
+func (d *DADOptions) StateTypeName() string {
+	return "pkg/tcpip/network/internal/ip.DADOptions"
+}
+
+func (d *DADOptions) StateFields() []string {
+	return []string{
+		"Clock",
+		"NonceSize",
+		"ExtendDADTransmits",
+		"Protocol",
+		"NICID",
+	}
+}
+
+func (d *DADOptions) beforeSave() {}
+
+// +checklocksignore
+func (d *DADOptions) StateSave(stateSinkObject state.Sink) {
+	d.beforeSave()
+	stateSinkObject.Save(0, &d.Clock)
+	stateSinkObject.Save(1, &d.NonceSize)
+	stateSinkObject.Save(2, &d.ExtendDADTransmits)
+	stateSinkObject.Save(3, &d.Protocol)
+	stateSinkObject.Save(4, &d.NICID)
+}
+
+func (d *DADOptions) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (d *DADOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &d.Clock)
+	stateSourceObject.Load(1, &d.NonceSize)
+	stateSourceObject.Load(2, &d.ExtendDADTransmits)
+	stateSourceObject.Load(3, &d.Protocol)
+	stateSourceObject.Load(4, &d.NICID)
+}
+
+func (d *DAD) StateTypeName() string {
+	return "pkg/tcpip/network/internal/ip.DAD"
+}
+
+func (d *DAD) StateFields() []string {
+	return []string{
+		"opts",
+		"configs",
+		"addresses",
+	}
+}
+
+func (d *DAD) beforeSave() {}
+
+// +checklocksignore
+func (d *DAD) StateSave(stateSinkObject state.Sink) {
+	d.beforeSave()
+	stateSinkObject.Save(0, &d.opts)
+	stateSinkObject.Save(1, &d.configs)
+	stateSinkObject.Save(2, &d.addresses)
+}
+
+func (d *DAD) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (d *DAD) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &d.opts)
+	stateSourceObject.Load(1, &d.configs)
+	stateSourceObject.Load(2, &d.addresses)
+}
+
 func (e *ErrMessageTooLong) StateTypeName() string {
 	return "pkg/tcpip/network/internal/ip.ErrMessageTooLong"
 }
@@ -21,10 +128,10 @@ func (e *ErrMessageTooLong) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrMessageTooLong) afterLoad() {}
+func (e *ErrMessageTooLong) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrMessageTooLong) StateLoad(stateSourceObject state.Source) {
+func (e *ErrMessageTooLong) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrNoMulticastPendingQueueBufferSpace) StateTypeName() string {
@@ -42,13 +149,284 @@ func (e *ErrNoMulticastPendingQueueBufferSpace) StateSave(stateSinkObject state.
 	e.beforeSave()
 }
 
-func (e *ErrNoMulticastPendingQueueBufferSpace) afterLoad() {}
+func (e *ErrNoMulticastPendingQueueBufferSpace) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (e *ErrNoMulticastPendingQueueBufferSpace) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+}
+
+func (m *multicastGroupState) StateTypeName() string {
+	return "pkg/tcpip/network/internal/ip.multicastGroupState"
+}
+
+func (m *multicastGroupState) StateFields() []string {
+	return []string{
+		"joins",
+		"transmissionLeft",
+		"lastToSendReport",
+		"delayedReportJob",
+		"queriedIncludeSources",
+		"deleteScheduled",
+	}
+}
+
+func (m *multicastGroupState) beforeSave() {}
+
+// +checklocksignore
+func (m *multicastGroupState) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.joins)
+	stateSinkObject.Save(1, &m.transmissionLeft)
+	stateSinkObject.Save(2, &m.lastToSendReport)
+	stateSinkObject.Save(3, &m.delayedReportJob)
+	stateSinkObject.Save(4, &m.queriedIncludeSources)
+	stateSinkObject.Save(5, &m.deleteScheduled)
+}
+
+func (m *multicastGroupState) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multicastGroupState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.joins)
+	stateSourceObject.Load(1, &m.transmissionLeft)
+	stateSourceObject.Load(2, &m.lastToSendReport)
+	stateSourceObject.Load(3, &m.delayedReportJob)
+	stateSourceObject.Load(4, &m.queriedIncludeSources)
+	stateSourceObject.Load(5, &m.deleteScheduled)
+}
+
+func (g *GenericMulticastProtocolOptions) StateTypeName() string {
+	return "pkg/tcpip/network/internal/ip.GenericMulticastProtocolOptions"
+}
+
+func (g *GenericMulticastProtocolOptions) StateFields() []string {
+	return []string{
+		"Clock",
+		"Protocol",
+		"MaxUnsolicitedReportDelay",
+	}
+}
+
+func (g *GenericMulticastProtocolOptions) beforeSave() {}
+
+// +checklocksignore
+func (g *GenericMulticastProtocolOptions) StateSave(stateSinkObject state.Sink) {
+	g.beforeSave()
+	stateSinkObject.Save(0, &g.Clock)
+	stateSinkObject.Save(1, &g.Protocol)
+	stateSinkObject.Save(2, &g.MaxUnsolicitedReportDelay)
+}
+
+func (g *GenericMulticastProtocolOptions) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (g *GenericMulticastProtocolOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &g.Clock)
+	stateSourceObject.Load(1, &g.Protocol)
+	stateSourceObject.Load(2, &g.MaxUnsolicitedReportDelay)
+}
+
+func (g *GenericMulticastProtocolState) StateTypeName() string {
+	return "pkg/tcpip/network/internal/ip.GenericMulticastProtocolState"
+}
+
+func (g *GenericMulticastProtocolState) StateFields() []string {
+	return []string{
+		"opts",
+		"memberships",
+		"robustnessVariable",
+		"queryInterval",
+		"mode",
+		"modeTimer",
+		"generalQueryV2Timer",
+		"stateChangedReportV2Timer",
+		"stateChangedReportV2TimerSet",
+	}
+}
+
+func (g *GenericMulticastProtocolState) beforeSave() {}
+
+// +checklocksignore
+func (g *GenericMulticastProtocolState) StateSave(stateSinkObject state.Sink) {
+	g.beforeSave()
+	stateSinkObject.Save(0, &g.opts)
+	stateSinkObject.Save(1, &g.memberships)
+	stateSinkObject.Save(2, &g.robustnessVariable)
+	stateSinkObject.Save(3, &g.queryInterval)
+	stateSinkObject.Save(4, &g.mode)
+	stateSinkObject.Save(5, &g.modeTimer)
+	stateSinkObject.Save(6, &g.generalQueryV2Timer)
+	stateSinkObject.Save(7, &g.stateChangedReportV2Timer)
+	stateSinkObject.Save(8, &g.stateChangedReportV2TimerSet)
+}
+
+func (g *GenericMulticastProtocolState) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (g *GenericMulticastProtocolState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &g.opts)
+	stateSourceObject.Load(1, &g.memberships)
+	stateSourceObject.Load(2, &g.robustnessVariable)
+	stateSourceObject.Load(3, &g.queryInterval)
+	stateSourceObject.Load(4, &g.mode)
+	stateSourceObject.Load(5, &g.modeTimer)
+	stateSourceObject.Load(6, &g.generalQueryV2Timer)
+	stateSourceObject.Load(7, &g.stateChangedReportV2Timer)
+	stateSourceObject.Load(8, &g.stateChangedReportV2TimerSet)
+}
+
+func (m *MultiCounterIPForwardingStats) StateTypeName() string {
+	return "pkg/tcpip/network/internal/ip.MultiCounterIPForwardingStats"
+}
+
+func (m *MultiCounterIPForwardingStats) StateFields() []string {
+	return []string{
+		"Unrouteable",
+		"ExhaustedTTL",
+		"InitializingSource",
+		"LinkLocalSource",
+		"LinkLocalDestination",
+		"PacketTooBig",
+		"HostUnreachable",
+		"ExtensionHeaderProblem",
+		"UnexpectedMulticastInputInterface",
+		"UnknownOutputEndpoint",
+		"NoMulticastPendingQueueBufferSpace",
+		"OutgoingDeviceNoBufferSpace",
+		"Errors",
+	}
+}
+
+func (m *MultiCounterIPForwardingStats) beforeSave() {}
+
+// +checklocksignore
+func (m *MultiCounterIPForwardingStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.Unrouteable)
+	stateSinkObject.Save(1, &m.ExhaustedTTL)
+	stateSinkObject.Save(2, &m.InitializingSource)
+	stateSinkObject.Save(3, &m.LinkLocalSource)
+	stateSinkObject.Save(4, &m.LinkLocalDestination)
+	stateSinkObject.Save(5, &m.PacketTooBig)
+	stateSinkObject.Save(6, &m.HostUnreachable)
+	stateSinkObject.Save(7, &m.ExtensionHeaderProblem)
+	stateSinkObject.Save(8, &m.UnexpectedMulticastInputInterface)
+	stateSinkObject.Save(9, &m.UnknownOutputEndpoint)
+	stateSinkObject.Save(10, &m.NoMulticastPendingQueueBufferSpace)
+	stateSinkObject.Save(11, &m.OutgoingDeviceNoBufferSpace)
+	stateSinkObject.Save(12, &m.Errors)
+}
+
+func (m *MultiCounterIPForwardingStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *MultiCounterIPForwardingStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.Unrouteable)
+	stateSourceObject.Load(1, &m.ExhaustedTTL)
+	stateSourceObject.Load(2, &m.InitializingSource)
+	stateSourceObject.Load(3, &m.LinkLocalSource)
+	stateSourceObject.Load(4, &m.LinkLocalDestination)
+	stateSourceObject.Load(5, &m.PacketTooBig)
+	stateSourceObject.Load(6, &m.HostUnreachable)
+	stateSourceObject.Load(7, &m.ExtensionHeaderProblem)
+	stateSourceObject.Load(8, &m.UnexpectedMulticastInputInterface)
+	stateSourceObject.Load(9, &m.UnknownOutputEndpoint)
+	stateSourceObject.Load(10, &m.NoMulticastPendingQueueBufferSpace)
+	stateSourceObject.Load(11, &m.OutgoingDeviceNoBufferSpace)
+	stateSourceObject.Load(12, &m.Errors)
+}
+
+func (m *MultiCounterIPStats) StateTypeName() string {
+	return "pkg/tcpip/network/internal/ip.MultiCounterIPStats"
+}
+
+func (m *MultiCounterIPStats) StateFields() []string {
+	return []string{
+		"PacketsReceived",
+		"ValidPacketsReceived",
+		"DisabledPacketsReceived",
+		"InvalidDestinationAddressesReceived",
+		"InvalidSourceAddressesReceived",
+		"PacketsDelivered",
+		"PacketsSent",
+		"OutgoingPacketErrors",
+		"MalformedPacketsReceived",
+		"MalformedFragmentsReceived",
+		"IPTablesPreroutingDropped",
+		"IPTablesInputDropped",
+		"IPTablesForwardDropped",
+		"IPTablesOutputDropped",
+		"IPTablesPostroutingDropped",
+		"OptionTimestampReceived",
+		"OptionRecordRouteReceived",
+		"OptionRouterAlertReceived",
+		"OptionUnknownReceived",
+		"Forwarding",
+	}
+}
+
+func (m *MultiCounterIPStats) beforeSave() {}
+
+// +checklocksignore
+func (m *MultiCounterIPStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.PacketsReceived)
+	stateSinkObject.Save(1, &m.ValidPacketsReceived)
+	stateSinkObject.Save(2, &m.DisabledPacketsReceived)
+	stateSinkObject.Save(3, &m.InvalidDestinationAddressesReceived)
+	stateSinkObject.Save(4, &m.InvalidSourceAddressesReceived)
+	stateSinkObject.Save(5, &m.PacketsDelivered)
+	stateSinkObject.Save(6, &m.PacketsSent)
+	stateSinkObject.Save(7, &m.OutgoingPacketErrors)
+	stateSinkObject.Save(8, &m.MalformedPacketsReceived)
+	stateSinkObject.Save(9, &m.MalformedFragmentsReceived)
+	stateSinkObject.Save(10, &m.IPTablesPreroutingDropped)
+	stateSinkObject.Save(11, &m.IPTablesInputDropped)
+	stateSinkObject.Save(12, &m.IPTablesForwardDropped)
+	stateSinkObject.Save(13, &m.IPTablesOutputDropped)
+	stateSinkObject.Save(14, &m.IPTablesPostroutingDropped)
+	stateSinkObject.Save(15, &m.OptionTimestampReceived)
+	stateSinkObject.Save(16, &m.OptionRecordRouteReceived)
+	stateSinkObject.Save(17, &m.OptionRouterAlertReceived)
+	stateSinkObject.Save(18, &m.OptionUnknownReceived)
+	stateSinkObject.Save(19, &m.Forwarding)
+}
+
+func (m *MultiCounterIPStats) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrNoMulticastPendingQueueBufferSpace) StateLoad(stateSourceObject state.Source) {
+func (m *MultiCounterIPStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.PacketsReceived)
+	stateSourceObject.Load(1, &m.ValidPacketsReceived)
+	stateSourceObject.Load(2, &m.DisabledPacketsReceived)
+	stateSourceObject.Load(3, &m.InvalidDestinationAddressesReceived)
+	stateSourceObject.Load(4, &m.InvalidSourceAddressesReceived)
+	stateSourceObject.Load(5, &m.PacketsDelivered)
+	stateSourceObject.Load(6, &m.PacketsSent)
+	stateSourceObject.Load(7, &m.OutgoingPacketErrors)
+	stateSourceObject.Load(8, &m.MalformedPacketsReceived)
+	stateSourceObject.Load(9, &m.MalformedFragmentsReceived)
+	stateSourceObject.Load(10, &m.IPTablesPreroutingDropped)
+	stateSourceObject.Load(11, &m.IPTablesInputDropped)
+	stateSourceObject.Load(12, &m.IPTablesForwardDropped)
+	stateSourceObject.Load(13, &m.IPTablesOutputDropped)
+	stateSourceObject.Load(14, &m.IPTablesPostroutingDropped)
+	stateSourceObject.Load(15, &m.OptionTimestampReceived)
+	stateSourceObject.Load(16, &m.OptionRecordRouteReceived)
+	stateSourceObject.Load(17, &m.OptionRouterAlertReceived)
+	stateSourceObject.Load(18, &m.OptionUnknownReceived)
+	stateSourceObject.Load(19, &m.Forwarding)
 }
 
 func init() {
+	state.Register((*dadState)(nil))
+	state.Register((*DADOptions)(nil))
+	state.Register((*DAD)(nil))
 	state.Register((*ErrMessageTooLong)(nil))
 	state.Register((*ErrNoMulticastPendingQueueBufferSpace)(nil))
+	state.Register((*multicastGroupState)(nil))
+	state.Register((*GenericMulticastProtocolOptions)(nil))
+	state.Register((*GenericMulticastProtocolState)(nil))
+	state.Register((*MultiCounterIPForwardingStats)(nil))
+	state.Register((*MultiCounterIPStats)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go
index d1e112a0..85990f5d 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go
@@ -20,6 +20,8 @@ import "gvisor.dev/gvisor/pkg/tcpip"
 
 // MultiCounterIPForwardingStats holds IP forwarding statistics. Each counter
 // may have several versions.
+//
+// +stateify savable
 type MultiCounterIPForwardingStats struct {
 	// Unrouteable is the number of IP packets received which were dropped
 	// because the netstack could not construct a route to their
@@ -66,7 +68,7 @@ type MultiCounterIPForwardingStats struct {
 	UnknownOutputEndpoint tcpip.MultiCounterStat
 
 	// NoMulticastPendingQueueBufferSpace is the number of multicast packets that
-	// were dropped due to insufficent buffer space in the pending packet queue.
+	// were dropped due to insufficient buffer space in the pending packet queue.
 	NoMulticastPendingQueueBufferSpace tcpip.MultiCounterStat
 
 	// OutgoingDeviceNoBufferSpace is the number of packets that were dropped due
@@ -101,6 +103,8 @@ func (m *MultiCounterIPForwardingStats) Init(a, b *tcpip.IPForwardingStats) {
 
 // MultiCounterIPStats holds IP statistics, each counter may have several
 // versions.
+//
+// +stateify savable
 type MultiCounterIPStats struct {
 	// PacketsReceived is the number of IP packets received from the link
 	// layer.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/multicast_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/multicast_state_autogen.go
index ad936ca4..ecf8fc26 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/multicast_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/multicast_state_autogen.go
@@ -1,3 +1,137 @@
 // automatically generated by stateify.
 
 package multicast
+
+import (
+	"context"
+
+	"gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *RouteTable) StateTypeName() string {
+	return "pkg/tcpip/network/internal/multicast.RouteTable"
+}
+
+func (r *RouteTable) StateFields() []string {
+	return []string{
+		"installedRoutes",
+		"pendingRoutes",
+		"cleanupPendingRoutesTimer",
+		"isCleanupRoutineRunning",
+		"config",
+	}
+}
+
+func (r *RouteTable) beforeSave() {}
+
+// +checklocksignore
+func (r *RouteTable) StateSave(stateSinkObject state.Sink) {
+	r.beforeSave()
+	stateSinkObject.Save(0, &r.installedRoutes)
+	stateSinkObject.Save(1, &r.pendingRoutes)
+	stateSinkObject.Save(2, &r.cleanupPendingRoutesTimer)
+	stateSinkObject.Save(3, &r.isCleanupRoutineRunning)
+	stateSinkObject.Save(4, &r.config)
+}
+
+func (r *RouteTable) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (r *RouteTable) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &r.installedRoutes)
+	stateSourceObject.Load(1, &r.pendingRoutes)
+	stateSourceObject.Load(2, &r.cleanupPendingRoutesTimer)
+	stateSourceObject.Load(3, &r.isCleanupRoutineRunning)
+	stateSourceObject.Load(4, &r.config)
+}
+
+func (r *InstalledRoute) StateTypeName() string {
+	return "pkg/tcpip/network/internal/multicast.InstalledRoute"
+}
+
+func (r *InstalledRoute) StateFields() []string {
+	return []string{
+		"MulticastRoute",
+		"lastUsedTimestamp",
+	}
+}
+
+func (r *InstalledRoute) beforeSave() {}
+
+// +checklocksignore
+func (r *InstalledRoute) StateSave(stateSinkObject state.Sink) {
+	r.beforeSave()
+	stateSinkObject.Save(0, &r.MulticastRoute)
+	stateSinkObject.Save(1, &r.lastUsedTimestamp)
+}
+
+func (r *InstalledRoute) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (r *InstalledRoute) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &r.MulticastRoute)
+	stateSourceObject.Load(1, &r.lastUsedTimestamp)
+}
+
+func (p *PendingRoute) StateTypeName() string {
+	return "pkg/tcpip/network/internal/multicast.PendingRoute"
+}
+
+func (p *PendingRoute) StateFields() []string {
+	return []string{
+		"packets",
+		"expiration",
+	}
+}
+
+func (p *PendingRoute) beforeSave() {}
+
+// +checklocksignore
+func (p *PendingRoute) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.packets)
+	stateSinkObject.Save(1, &p.expiration)
+}
+
+func (p *PendingRoute) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *PendingRoute) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.packets)
+	stateSourceObject.Load(1, &p.expiration)
+}
+
+func (c *Config) StateTypeName() string {
+	return "pkg/tcpip/network/internal/multicast.Config"
+}
+
+func (c *Config) StateFields() []string {
+	return []string{
+		"MaxPendingQueueSize",
+		"Clock",
+	}
+}
+
+func (c *Config) beforeSave() {}
+
+// +checklocksignore
+func (c *Config) StateSave(stateSinkObject state.Sink) {
+	c.beforeSave()
+	stateSinkObject.Save(0, &c.MaxPendingQueueSize)
+	stateSinkObject.Save(1, &c.Clock)
+}
+
+func (c *Config) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (c *Config) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &c.MaxPendingQueueSize)
+	stateSourceObject.Load(1, &c.Clock)
+}
+
+func init() {
+	state.Register((*RouteTable)(nil))
+	state.Register((*InstalledRoute)(nil))
+	state.Register((*PendingRoute)(nil))
+	state.Register((*Config)(nil))
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/route_table.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/route_table.go
index 5bade5ae..d74aa31f 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/route_table.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/route_table.go
@@ -26,6 +26,8 @@ import (
 )
 
 // RouteTable represents a multicast routing table.
+//
+// +stateify savable
 type RouteTable struct {
 	// Internally, installed and pending routes are stored and locked separately
 	// A couple of reasons for structuring the table this way:
@@ -43,13 +45,13 @@ type RouteTable struct {
 	// lock. This ensures that installed routes can continue to be read even when
 	// the pending routes are write locked.
 
-	installedMu sync.RWMutex
+	installedMu sync.RWMutex `state:"nosave"`
 	// Maintaining pointers ensures that the installed routes are exclusively
 	// locked only when a route is being installed.
 	// +checklocks:installedMu
 	installedRoutes map[stack.UnicastSourceAndMulticastDestination]*InstalledRoute
 
-	pendingMu sync.RWMutex
+	pendingMu sync.RWMutex `state:"nosave"`
 	// +checklocks:pendingMu
 	pendingRoutes map[stack.UnicastSourceAndMulticastDestination]PendingRoute
 	// cleanupPendingRoutesTimer is a timer that triggers a routine to remove
@@ -79,10 +81,12 @@ var (
 //
 // If a route is in the installed state, then it may be used to forward
 // multicast packets.
+//
+// +stateify savable
 type InstalledRoute struct {
 	stack.MulticastRoute
 
-	lastUsedTimestampMu sync.RWMutex
+	lastUsedTimestampMu sync.RWMutex `state:"nosave"`
 	// +checklocks:lastUsedTimestampMu
 	lastUsedTimestamp tcpip.MonotonicTime
 }
@@ -115,8 +119,10 @@ func (r *InstalledRoute) SetLastUsedTimestamp(monotonicTime tcpip.MonotonicTime)
 // A route is in the pending state if an installed route does not yet exist
 // for the entry. For such routes, packets are added to an expiring queue until
 // a route is installed.
+//
+// +stateify savable
 type PendingRoute struct {
-	packets []stack.PacketBufferPtr
+	packets []*stack.PacketBuffer
 
 	// expiration is the timestamp at which the pending route should be expired.
 	//
@@ -159,6 +165,8 @@ const (
 )
 
 // Config represents the options for configuring a RouteTable.
+//
+// +stateify savable
 type Config struct {
 	// MaxPendingQueueSize corresponds to the maximum number of queued packets
 	// for a pending route.
@@ -265,7 +273,7 @@ func (r *RouteTable) cleanupPendingRoutes() {
 
 func (r *RouteTable) newPendingRoute() PendingRoute {
 	return PendingRoute{
-		packets:    make([]stack.PacketBufferPtr, 0, r.config.MaxPendingQueueSize),
+		packets:    make([]*stack.PacketBuffer, 0, r.config.MaxPendingQueueSize),
 		expiration: r.config.Clock.NowMonotonic().Add(DefaultPendingRouteExpiration),
 	}
 }
@@ -326,7 +334,7 @@ func (e GetRouteResultState) String() string {
 //
 // If the relevant pending route queue is at max capacity, then returns false.
 // Otherwise, returns true.
-func (r *RouteTable) GetRouteOrInsertPending(key stack.UnicastSourceAndMulticastDestination, pkt stack.PacketBufferPtr) (GetRouteResult, bool) {
+func (r *RouteTable) GetRouteOrInsertPending(key stack.UnicastSourceAndMulticastDestination, pkt *stack.PacketBuffer) (GetRouteResult, bool) {
 	r.installedMu.RLock()
 	defer r.installedMu.RUnlock()
 
@@ -374,7 +382,7 @@ func (r *RouteTable) getOrCreatePendingRouteRLocked(key stack.UnicastSourceAndMu
 // returned. The caller assumes ownership of these packets and is responsible
 // for forwarding and releasing them. If an installed route already exists for
 // the provided key, then it is overwritten.
-func (r *RouteTable) AddInstalledRoute(key stack.UnicastSourceAndMulticastDestination, route *InstalledRoute) []stack.PacketBufferPtr {
+func (r *RouteTable) AddInstalledRoute(key stack.UnicastSourceAndMulticastDestination, route *InstalledRoute) []*stack.PacketBuffer {
 	r.installedMu.Lock()
 	defer r.installedMu.Unlock()
 	r.installedRoutes[key] = route
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go
index 875eca47..8e96ca80 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go
@@ -232,8 +232,7 @@ func (e *endpoint) checkLocalAddress(addr tcpip.Address) bool {
 		return true
 	}
 
-	if addressEndpoint := e.AcquireAssignedAddress(addr, false, stack.NeverPrimaryEndpoint); addressEndpoint != nil {
-		addressEndpoint.DecRef()
+	if addressEndpoint := e.AcquireAssignedAddress(addr, false, stack.NeverPrimaryEndpoint, true /* readOnly */); addressEndpoint != nil {
 		return true
 	}
 	return false
@@ -243,7 +242,7 @@ func (e *endpoint) checkLocalAddress(addr tcpip.Address) bool {
 // of the original packet that caused the ICMP one to be sent. This information
 // is used to find out which transport endpoint must be notified about the ICMP
 // packet. We only expect the payload, not the enclosing ICMP packet.
-func (e *endpoint) handleControl(errInfo stack.TransportError, pkt stack.PacketBufferPtr) {
+func (e *endpoint) handleControl(errInfo stack.TransportError, pkt *stack.PacketBuffer) {
 	h, ok := pkt.Data().PullUp(header.IPv4MinimumSize)
 	if !ok {
 		return
@@ -280,7 +279,7 @@ func (e *endpoint) handleControl(errInfo stack.TransportError, pkt stack.PacketB
 	e.dispatcher.DeliverTransportError(srcAddr, dstAddr, ProtocolNumber, p, errInfo, pkt)
 }
 
-func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr) {
+func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {
 	received := e.stats.icmp.packetsReceived
 	h := header.ICMPv4(pkt.TransportHeader().Slice())
 	if len(h) < header.ICMPv4MinimumSize {
@@ -607,7 +606,7 @@ func (*icmpReasonHostUnreachable) isICMPReason() {}
 // the problematic packet. It incorporates as much of that packet as
 // possible as well as any error metadata as is available. returnError
 // expects pkt to hold a valid IPv4 packet as per the wire format.
-func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, deliveredLocally bool) tcpip.Error {
+func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer, deliveredLocally bool) tcpip.Error {
 	origIPHdr := header.IPv4(pkt.NetworkHeader().Slice())
 	origIPHdrSrc := origIPHdr.SourceAddress()
 	origIPHdrDst := origIPHdr.DestinationAddress()
@@ -807,7 +806,7 @@ func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, del
 }
 
 // OnReassemblyTimeout implements fragmentation.TimeoutHandler.
-func (p *protocol) OnReassemblyTimeout(pkt stack.PacketBufferPtr) {
+func (p *protocol) OnReassemblyTimeout(pkt *stack.PacketBuffer) {
 	// OnReassemblyTimeout sends a Time Exceeded Message, as per RFC 792:
 	//
 	//   If a host reassembling a fragmented datagram cannot complete the
@@ -816,7 +815,7 @@ func (p *protocol) OnReassemblyTimeout(pkt stack.PacketBufferPtr) {
 	//
 	//   If fragment zero is not available then no time exceeded need be sent at
 	//   all.
-	if !pkt.IsNil() {
+	if pkt != nil {
 		p.returnError(&icmpReasonReassemblyTimeout{}, pkt, true /* deliveredLocally */)
 	}
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go
index 6db1cf17..b7a3ce29 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go
@@ -90,6 +90,8 @@ type IGMPEndpoint interface {
 }
 
 // IGMPOptions holds options for IGMP.
+//
+// +stateify savable
 type IGMPOptions struct {
 	// Enabled indicates whether IGMP will be performed.
 	//
@@ -107,6 +109,8 @@ var _ ip.MulticastGroupProtocol = (*igmpState)(nil)
 // igmpState is the per-interface IGMP state.
 //
 // igmpState.init() MUST be called after creating an IGMP state.
+//
+// +stateify savable
 type igmpState struct {
 	// The IPv4 endpoint this igmpState is for.
 	ep *endpoint
@@ -283,7 +287,7 @@ func (*igmpState) V2QueryMaxRespCodeToV1Delay(code uint16) time.Duration {
 func (igmp *igmpState) init(ep *endpoint) {
 	igmp.ep = ep
 	igmp.genericMulticastProtocol.Init(&ep.mu, ip.GenericMulticastProtocolOptions{
-		Rand:                      ep.protocol.stack.Rand(),
+		Rand:                      ep.protocol.stack.InsecureRNG(),
 		Clock:                     ep.protocol.stack.Clock(),
 		Protocol:                  igmp,
 		MaxUnsolicitedReportDelay: UnsolicitedReportIntervalMax,
@@ -328,7 +332,7 @@ func (igmp *igmpState) isSourceIPValidLocked(src tcpip.Address, messageType head
 }
 
 // +checklocks:igmp.ep.mu
-func (igmp *igmpState) isPacketValidLocked(pkt stack.PacketBufferPtr, messageType header.IGMPType, hasRouterAlertOption bool) bool {
+func (igmp *igmpState) isPacketValidLocked(pkt *stack.PacketBuffer, messageType header.IGMPType, hasRouterAlertOption bool) bool {
 	// We can safely assume that the IP header is valid if we got this far.
 	iph := header.IPv4(pkt.NetworkHeader().Slice())
 
@@ -346,7 +350,7 @@ func (igmp *igmpState) isPacketValidLocked(pkt stack.PacketBufferPtr, messageTyp
 // handleIGMP handles an IGMP packet.
 //
 // +checklocks:igmp.ep.mu
-func (igmp *igmpState) handleIGMP(pkt stack.PacketBufferPtr, hasRouterAlertOption bool) {
+func (igmp *igmpState) handleIGMP(pkt *stack.PacketBuffer, hasRouterAlertOption bool) {
 	received := igmp.ep.stats.igmp.packetsReceived
 	hdr, ok := pkt.Data().PullUp(pkt.Data().Size())
 	if !ok {
@@ -521,7 +525,7 @@ func (igmp *igmpState) writePacketInner(buf *buffer.View, reportStat tcpip.Multi
 	})
 	defer pkt.DecRef()
 
-	addressEndpoint := igmp.ep.acquireOutgoingPrimaryAddressRLocked(destAddress, false /* allowExpired */)
+	addressEndpoint := igmp.ep.acquireOutgoingPrimaryAddressRLocked(destAddress, tcpip.Address{} /* srcHint */, false /* allowExpired */)
 	if addressEndpoint == nil {
 		return false, nil
 	}
@@ -586,7 +590,7 @@ func (igmp *igmpState) softLeaveAll() {
 	igmp.genericMulticastProtocol.MakeAllNonMemberLocked()
 }
 
-// initializeAll attemps to initialize the IGMP state for each group that has
+// initializeAll attempts to initialize the IGMP state for each group that has
 // been joined locally.
 //
 // +checklocks:igmp.ep.mu
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go
index 2e5ab026..e2721a4d 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go
@@ -79,6 +79,7 @@ var _ stack.AddressableEndpoint = (*endpoint)(nil)
 var _ stack.NetworkEndpoint = (*endpoint)(nil)
 var _ IGMPEndpoint = (*endpoint)(nil)
 
+// +stateify savable
 type endpoint struct {
 	nic        stack.NetworkInterface
 	dispatcher stack.TransportDispatcher
@@ -95,13 +96,10 @@ type endpoint struct {
 
 	// multicastForwarding is set to forwardingEnabled when the endpoint has
 	// forwarding enabled and forwardingDisabled when it is disabled.
-	//
-	// TODO(https://gvisor.dev/issue/7338): Implement support for multicast
-	//forwarding. Currently, setting this value to true is a no-op.
 	multicastForwarding atomicbitops.Uint32
 
 	// mu protects below.
-	mu sync.RWMutex
+	mu sync.RWMutex `state:"nosave"`
 
 	// +checklocks:mu
 	addressableEndpointState stack.AddressableEndpointState
@@ -137,7 +135,7 @@ func (e *endpoint) getIGMPVersionLocked() IGMPVersion {
 }
 
 // HandleLinkResolutionFailure implements stack.LinkResolvableNetworkEndpoint.
-func (e *endpoint) HandleLinkResolutionFailure(pkt stack.PacketBufferPtr) {
+func (e *endpoint) HandleLinkResolutionFailure(pkt *stack.PacketBuffer) {
 	// If we are operating as a router, return an ICMP error to the original
 	// packet's sender.
 	if pkt.NetworkPacketInfo.IsForwardedPacket {
@@ -193,8 +191,7 @@ func (p *protocol) findEndpointWithAddress(addr tcpip.Address) *endpoint {
 	defer p.mu.RUnlock()
 
 	for _, e := range p.eps {
-		if addressEndpoint := e.AcquireAssignedAddress(addr, false /* allowTemp */, stack.NeverPrimaryEndpoint); addressEndpoint != nil {
-			addressEndpoint.DecRef()
+		if addressEndpoint := e.AcquireAssignedAddress(addr, false /* allowTemp */, stack.NeverPrimaryEndpoint, true /* readOnly */); addressEndpoint != nil {
 			return e
 		}
 	}
@@ -437,7 +434,18 @@ func (e *endpoint) NetworkProtocolNumber() tcpip.NetworkProtocolNumber {
 	return e.protocol.Number()
 }
 
-func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketBufferPtr, params stack.NetworkHeaderParams, options header.IPv4OptionsSerializer) tcpip.Error {
+// getID returns a random uint16 number (other than zero) to be used as ID in
+// the IPv4 header.
+func (e *endpoint) getID() uint16 {
+	rng := e.protocol.stack.SecureRNG()
+	id := rng.Uint16()
+	for id == 0 {
+		id = rng.Uint16()
+	}
+	return id
+}
+
+func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams, options header.IPv4OptionsSerializer) tcpip.Error {
 	hdrLen := header.IPv4MinimumSize
 	var optLen int
 	if options != nil {
@@ -452,20 +460,26 @@ func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketB
 	if length > math.MaxUint16 {
 		return &tcpip.ErrMessageTooLong{}
 	}
-	// RFC 6864 section 4.3 mandates uniqueness of ID values for non-atomic
-	// datagrams. Since the DF bit is never being set here, all datagrams
-	// are non-atomic and need an ID.
-	id := e.protocol.ids[hashRoute(srcAddr, dstAddr, params.Protocol, e.protocol.hashIV)%buckets].Add(1)
-	ipH.Encode(&header.IPv4Fields{
+
+	fields := header.IPv4Fields{
 		TotalLength: uint16(length),
-		ID:          uint16(id),
 		TTL:         params.TTL,
 		TOS:         params.TOS,
 		Protocol:    uint8(params.Protocol),
 		SrcAddr:     srcAddr,
 		DstAddr:     dstAddr,
 		Options:     options,
-	})
+	}
+	if params.DF {
+		// Treat want and do the same.
+		fields.Flags = header.IPv4FlagDontFragment
+	} else {
+		// RFC 6864 section 4.3 mandates uniqueness of ID values for
+		// non-atomic datagrams.
+		fields.ID = e.getID()
+	}
+	ipH.Encode(&fields)
+
 	ipH.SetChecksum(^ipH.CalculateChecksum())
 	pkt.NetworkProtocolNumber = ProtocolNumber
 	return nil
@@ -475,7 +489,7 @@ func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketB
 // fragment. It returns the number of fragments handled and the number of
 // fragments left to be processed. The IP header must already be present in the
 // original packet.
-func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt stack.PacketBufferPtr, handler func(stack.PacketBufferPtr) tcpip.Error) (int, int, tcpip.Error) {
+func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt *stack.PacketBuffer, handler func(*stack.PacketBuffer) tcpip.Error) (int, int, tcpip.Error) {
 	// Round the MTU down to align to 8 bytes.
 	fragmentPayloadSize := networkMTU &^ 7
 	networkHeader := header.IPv4(pkt.NetworkHeader().Slice())
@@ -498,7 +512,7 @@ func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt stack.
 }
 
 // WritePacket writes a packet to the given destination address and protocol.
-func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt stack.PacketBufferPtr) tcpip.Error {
+func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt *stack.PacketBuffer) tcpip.Error {
 	if err := e.addIPHeader(r.LocalAddress(), r.RemoteAddress(), pkt, params, nil /* options */); err != nil {
 		return err
 	}
@@ -506,7 +520,7 @@ func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams,
 	return e.writePacket(r, pkt)
 }
 
-func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip.Error {
+func (e *endpoint) writePacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error {
 	netHeader := header.IPv4(pkt.NetworkHeader().Slice())
 	dstAddr := netHeader.DestinationAddress()
 
@@ -538,7 +552,7 @@ func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip.
 	return e.writePacketPostRouting(r, pkt, false /* headerIncluded */)
 }
 
-func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt stack.PacketBufferPtr, headerIncluded bool) tcpip.Error {
+func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt *stack.PacketBuffer, headerIncluded bool) tcpip.Error {
 	if r.Loop()&stack.PacketLoop != 0 {
 		// If the packet was generated by the stack (not a raw/packet endpoint
 		// where a packet may be written with the header included), then we can
@@ -573,7 +587,7 @@ func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt stack.PacketBuffer
 			// is set but the packet must be fragmented for the non-forwarding case.
 			return &tcpip.ErrMessageTooLong{}
 		}
-		sent, remain, err := e.handleFragments(r, networkMTU, pkt, func(fragPkt stack.PacketBufferPtr) tcpip.Error {
+		sent, remain, err := e.handleFragments(r, networkMTU, pkt, func(fragPkt *stack.PacketBuffer) tcpip.Error {
 			// TODO(gvisor.dev/issue/3884): Evaluate whether we want to send each
 			// fragment one by one using WritePacket() (current strategy) or if we
 			// want to create a PacketBufferList from the fragments and feed it to
@@ -594,7 +608,7 @@ func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt stack.PacketBuffer
 }
 
 // WriteHeaderIncludedPacket implements stack.NetworkEndpoint.
-func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip.Error {
+func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error {
 	// The packet already has an IP header, but there are a few required
 	// checks.
 	h, ok := pkt.Data().PullUp(header.IPv4MinimumSize)
@@ -628,7 +642,7 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBuf
 		// non-atomic datagrams, so assign an ID to all such datagrams
 		// according to the definition given in RFC 6864 section 4.
 		if ipH.Flags()&header.IPv4FlagDontFragment == 0 || ipH.Flags()&header.IPv4FlagMoreFragments != 0 || ipH.FragmentOffset() > 0 {
-			ipH.SetID(uint16(e.protocol.ids[hashRoute(r.LocalAddress(), r.RemoteAddress(), 0 /* protocol */, e.protocol.hashIV)%buckets].Add(1)))
+			ipH.SetID(e.getID())
 		}
 	}
 
@@ -656,7 +670,7 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBuf
 // updating the options.
 //
 // This method should be invoked by the endpoint that received the pkt.
-func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBufferPtr, updateOptions bool) ip.ForwardingError {
+func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt *stack.PacketBuffer, updateOptions bool) ip.ForwardingError {
 	h := header.IPv4(pkt.NetworkHeader().Slice())
 	stk := e.protocol.stack
 
@@ -726,7 +740,7 @@ func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBu
 }
 
 // forwardUnicastPacket attempts to forward a packet to its final destination.
-func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.ForwardingError {
+func (e *endpoint) forwardUnicastPacket(pkt *stack.PacketBuffer) ip.ForwardingError {
 	hView := pkt.NetworkHeader().View()
 	defer hView.Release()
 	h := header.IPv4(hView.AsSlice())
@@ -804,7 +818,7 @@ func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.Forwarding
 
 // HandlePacket is called by the link layer when new ipv4 packets arrive for
 // this endpoint.
-func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) {
+func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {
 	stats := e.stats.ip
 
 	stats.PacketsReceived.Increment()
@@ -836,10 +850,8 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) {
 		}
 
 		if e.protocol.stack.HandleLocal() {
-			addressEndpoint := e.AcquireAssignedAddress(header.IPv4(pkt.NetworkHeader().Slice()).SourceAddress(), e.nic.Promiscuous(), stack.CanBePrimaryEndpoint)
+			addressEndpoint := e.AcquireAssignedAddress(header.IPv4(pkt.NetworkHeader().Slice()).SourceAddress(), e.nic.Promiscuous(), stack.CanBePrimaryEndpoint, true /* readOnly */)
 			if addressEndpoint != nil {
-				addressEndpoint.DecRef()
-
 				// The source address is one of our own, so we never should have gotten
 				// a packet like this unless HandleLocal is false or our NIC is the
 				// loopback interface.
@@ -863,7 +875,7 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) {
 // handleLocalPacket is like HandlePacket except it does not perform the
 // prerouting iptables hook or check for loopback traffic that originated from
 // outside of the netstack (i.e. martian loopback packets).
-func (e *endpoint) handleLocalPacket(pkt stack.PacketBufferPtr, canSkipRXChecksum bool) {
+func (e *endpoint) handleLocalPacket(pkt *stack.PacketBuffer, canSkipRXChecksum bool) {
 	stats := e.stats.ip
 	stats.PacketsReceived.Increment()
 
@@ -935,7 +947,7 @@ func validateAddressesForForwarding(h header.IPv4) ip.ForwardingError {
 //
 // This method should be invoked for incoming multicast packets using the
 // endpoint that received the packet.
-func (e *endpoint) forwardMulticastPacket(h header.IPv4, pkt stack.PacketBufferPtr) ip.ForwardingError {
+func (e *endpoint) forwardMulticastPacket(h header.IPv4, pkt *stack.PacketBuffer) ip.ForwardingError {
 	if err := validateAddressesForForwarding(h); err != nil {
 		return err
 	}
@@ -988,7 +1000,7 @@ func (e *endpoint) forwardMulticastPacket(h header.IPv4, pkt stack.PacketBufferP
 	return &ip.ErrHostUnreachable{}
 }
 
-func (e *endpoint) updateOptionsForForwarding(pkt stack.PacketBufferPtr) ip.ForwardingError {
+func (e *endpoint) updateOptionsForForwarding(pkt *stack.PacketBuffer) ip.ForwardingError {
 	h := header.IPv4(pkt.NetworkHeader().Slice())
 	if opts := h.Options(); len(opts) != 0 {
 		newOpts, _, optProblem := e.processIPOptions(pkt, opts, &optionUsageForward{})
@@ -1023,7 +1035,7 @@ func (e *endpoint) updateOptionsForForwarding(pkt stack.PacketBufferPtr) ip.Forw
 // provided installedRoute.
 //
 // This method should be invoked by the endpoint that received the pkt.
-func (e *endpoint) forwardValidatedMulticastPacket(pkt stack.PacketBufferPtr, installedRoute *multicast.InstalledRoute) ip.ForwardingError {
+func (e *endpoint) forwardValidatedMulticastPacket(pkt *stack.PacketBuffer, installedRoute *multicast.InstalledRoute) ip.ForwardingError {
 	// Per RFC 1812 section 5.2.1.3,
 	//
 	//	 Based on the IP source and destination addresses found in the datagram
@@ -1056,7 +1068,7 @@ func (e *endpoint) forwardValidatedMulticastPacket(pkt stack.PacketBufferPtr, in
 // of the provided outgoingInterface.
 //
 // This method should be invoked by the endpoint that received the pkt.
-func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt stack.PacketBufferPtr, outgoingInterface stack.MulticastRouteOutgoingInterface) ip.ForwardingError {
+func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt *stack.PacketBuffer, outgoingInterface stack.MulticastRouteOutgoingInterface) ip.ForwardingError {
 	h := header.IPv4(pkt.NetworkHeader().Slice())
 
 	// Per RFC 1812 section 5.2.1.3,
@@ -1083,7 +1095,7 @@ func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt stack.PacketBu
 	return e.forwardPacketWithRoute(route, pkt, true /* updateOptions */)
 }
 
-func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt stack.PacketBufferPtr, inNICName string) {
+func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer, inNICName string) {
 	pkt.NICID = e.nic.ID()
 
 	// Raw socket packets are delivered based solely on the transport protocol
@@ -1108,9 +1120,8 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt stack.PacketBufferPt
 		return
 	}
 	// Make sure the source address is not a subnet-local broadcast address.
-	if addressEndpoint := e.AcquireAssignedAddress(srcAddr, false /* createTemp */, stack.NeverPrimaryEndpoint); addressEndpoint != nil {
+	if addressEndpoint := e.AcquireAssignedAddress(srcAddr, false /* createTemp */, stack.NeverPrimaryEndpoint, true /* readOnly */); addressEndpoint != nil {
 		subnet := addressEndpoint.Subnet()
-		addressEndpoint.DecRef()
 		if subnet.IsBroadcast(srcAddr) {
 			stats.ip.InvalidSourceAddressesReceived.Increment()
 			return
@@ -1147,9 +1158,8 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt stack.PacketBufferPt
 	//
 	// If the packet is destined for this device, then it should be delivered
 	// locally. Otherwise, if forwarding is enabled, it should be forwarded.
-	if addressEndpoint := e.AcquireAssignedAddress(dstAddr, e.nic.Promiscuous(), stack.CanBePrimaryEndpoint); addressEndpoint != nil {
+	if addressEndpoint := e.AcquireAssignedAddress(dstAddr, e.nic.Promiscuous(), stack.CanBePrimaryEndpoint, true /* readOnly */); addressEndpoint != nil {
 		subnet := addressEndpoint.AddressWithPrefix().Subnet()
-		addressEndpoint.DecRef()
 		pkt.NetworkPacketInfo.LocalAddressBroadcast = subnet.IsBroadcast(dstAddr) || dstAddr == header.IPv4Broadcast
 		e.deliverPacketLocally(h, pkt, inNICName)
 	} else if e.Forwarding() {
@@ -1194,7 +1204,7 @@ func (e *endpoint) handleForwardingError(err ip.ForwardingError) {
 	stats.Forwarding.Errors.Increment()
 }
 
-func (e *endpoint) deliverPacketLocally(h header.IPv4, pkt stack.PacketBufferPtr, inNICName string) {
+func (e *endpoint) deliverPacketLocally(h header.IPv4, pkt *stack.PacketBuffer, inNICName string) {
 	stats := e.stats
 	// iptables filtering. All packets that reach here are intended for
 	// this machine and will not be forwarded.
@@ -1352,8 +1362,8 @@ func (e *endpoint) Close() {
 
 // AddAndAcquirePermanentAddress implements stack.AddressableEndpoint.
 func (e *endpoint) AddAndAcquirePermanentAddress(addr tcpip.AddressWithPrefix, properties stack.AddressProperties) (stack.AddressEndpoint, tcpip.Error) {
-	e.mu.RLock()
-	defer e.mu.RUnlock()
+	e.mu.Lock()
+	defer e.mu.Unlock()
 
 	ep, err := e.addressableEndpointState.AddAndAcquireAddress(addr, properties, stack.Permanent)
 	if err == nil {
@@ -1364,7 +1374,7 @@ func (e *endpoint) AddAndAcquirePermanentAddress(addr tcpip.AddressWithPrefix, p
 
 // sendQueuedReports sends queued igmp reports.
 //
-// +checklocksread:e.mu
+// +checklocks:e.mu
 // +checklocksalias:e.igmp.ep.mu=e.mu
 func (e *endpoint) sendQueuedReports() {
 	e.igmp.sendQueuedReports()
@@ -1399,7 +1409,7 @@ func (e *endpoint) MainAddress() tcpip.AddressWithPrefix {
 }
 
 // AcquireAssignedAddress implements stack.AddressableEndpoint.
-func (e *endpoint) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB stack.PrimaryEndpointBehavior) stack.AddressEndpoint {
+func (e *endpoint) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB stack.PrimaryEndpointBehavior, readOnly bool) stack.AddressEndpoint {
 	e.mu.RLock()
 	defer e.mu.RUnlock()
 
@@ -1409,22 +1419,22 @@ func (e *endpoint) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp boo
 		// IPv4 has a notion of a subnet broadcast address and considers the
 		// loopback interface bound to an address's whole subnet (on linux).
 		return subnet.IsBroadcast(localAddr) || (loopback && subnet.Contains(localAddr))
-	}, allowTemp, tempPEB)
+	}, allowTemp, tempPEB, readOnly)
 }
 
 // AcquireOutgoingPrimaryAddress implements stack.AddressableEndpoint.
-func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {
+func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr, srcHint tcpip.Address, allowExpired bool) stack.AddressEndpoint {
 	e.mu.RLock()
 	defer e.mu.RUnlock()
-	return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, allowExpired)
+	return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, srcHint, allowExpired)
 }
 
 // acquireOutgoingPrimaryAddressRLocked is like AcquireOutgoingPrimaryAddress
 // but with locking requirements
 //
 // +checklocksread:e.mu
-func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {
-	return e.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, allowExpired)
+func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr, srcHint tcpip.Address, allowExpired bool) stack.AddressEndpoint {
+	return e.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, srcHint, allowExpired)
 }
 
 // PrimaryAddresses implements stack.AddressableEndpoint.
@@ -1493,11 +1503,12 @@ var _ stack.MulticastForwardingNetworkProtocol = (*protocol)(nil)
 var _ stack.RejectIPv4WithHandler = (*protocol)(nil)
 var _ fragmentation.TimeoutHandler = (*protocol)(nil)
 
+// +stateify savable
 type protocol struct {
 	stack *stack.Stack
 
 	// mu protects annotated fields below.
-	mu sync.RWMutex
+	mu sync.RWMutex `state:"nosave"`
 
 	// eps is keyed by NICID to allow protocol methods to retrieve an endpoint
 	// when handling a packet, by looking at which NIC handled the packet.
@@ -1514,6 +1525,8 @@ type protocol struct {
 
 	ids    []atomicbitops.Uint32
 	hashIV uint32
+	// idTS is the unix timestamp in milliseconds 'ids' was last accessed.
+	idTS atomicbitops.Int64
 
 	fragmentation *fragmentation.Fragmentation
 
@@ -1704,7 +1717,7 @@ func (p *protocol) MulticastRouteLastUsedTime(addresses stack.UnicastSourceAndMu
 	return timestamp, nil
 }
 
-func (p *protocol) forwardPendingMulticastPacket(pkt stack.PacketBufferPtr, installedRoute *multicast.InstalledRoute) {
+func (p *protocol) forwardPendingMulticastPacket(pkt *stack.PacketBuffer, installedRoute *multicast.InstalledRoute) {
 	defer pkt.DecRef()
 
 	// Attempt to forward the packet using the endpoint that it originally
@@ -1746,9 +1759,8 @@ func (p *protocol) isSubnetLocalBroadcastAddress(addr tcpip.Address) bool {
 	defer p.mu.RUnlock()
 
 	for _, e := range p.eps {
-		if addressEndpoint := e.AcquireAssignedAddress(addr, false /* createTemp */, stack.NeverPrimaryEndpoint); addressEndpoint != nil {
+		if addressEndpoint := e.AcquireAssignedAddress(addr, false /* createTemp */, stack.NeverPrimaryEndpoint, true /* readOnly */); addressEndpoint != nil {
 			subnet := addressEndpoint.Subnet()
-			addressEndpoint.DecRef()
 			if subnet.IsBroadcast(addr) {
 				return true
 			}
@@ -1761,7 +1773,7 @@ func (p *protocol) isSubnetLocalBroadcastAddress(addr tcpip.Address) bool {
 // returns the parsed IP header.
 //
 // Returns true if the IP header was successfully parsed.
-func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bool) {
+func (p *protocol) parseAndValidate(pkt *stack.PacketBuffer) (*buffer.View, bool) {
 	transProtoNum, hasTransportHdr, ok := p.Parse(pkt)
 	if !ok {
 		return nil, false
@@ -1785,7 +1797,7 @@ func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bo
 	return pkt.NetworkHeader().View(), true
 }
 
-func (p *protocol) parseTransport(pkt stack.PacketBufferPtr, transProtoNum tcpip.TransportProtocolNumber) {
+func (p *protocol) parseTransport(pkt *stack.PacketBuffer, transProtoNum tcpip.TransportProtocolNumber) {
 	if transProtoNum == header.ICMPv4ProtocolNumber {
 		// The transport layer will handle transport layer parsing errors.
 		_ = parse.ICMPv4(pkt)
@@ -1803,7 +1815,7 @@ func (p *protocol) parseTransport(pkt stack.PacketBufferPtr, transProtoNum tcpip
 }
 
 // Parse implements stack.NetworkProtocol.
-func (*protocol) Parse(pkt stack.PacketBufferPtr) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) {
+func (*protocol) Parse(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) {
 	if ok := parse.IPv4(pkt); !ok {
 		return 0, false, false
 	}
@@ -1830,7 +1842,7 @@ func (p *protocol) allowICMPReply(icmpType header.ICMPv4Type, code header.ICMPv4
 }
 
 // SendRejectionError implements stack.RejectIPv4WithHandler.
-func (p *protocol) SendRejectionError(pkt stack.PacketBufferPtr, rejectWith stack.RejectIPv4WithICMPType, inputHook bool) tcpip.Error {
+func (p *protocol) SendRejectionError(pkt *stack.PacketBuffer, rejectWith stack.RejectIPv4WithICMPType, inputHook bool) tcpip.Error {
 	switch rejectWith {
 	case stack.RejectIPv4WithICMPNetUnreachable:
 		return p.returnError(&icmpReasonNetworkUnreachable{}, pkt, inputHook)
@@ -1872,7 +1884,7 @@ func calculateNetworkMTU(linkMTU, networkHeaderSize uint32) (uint32, tcpip.Error
 	return networkMTU - networkHeaderSize, nil
 }
 
-func packetMustBeFragmented(pkt stack.PacketBufferPtr, networkMTU uint32) bool {
+func packetMustBeFragmented(pkt *stack.PacketBuffer, networkMTU uint32) bool {
 	payload := len(pkt.TransportHeader().Slice()) + pkt.Data().Size()
 	return pkt.GSOOptions.Type == stack.GSONone && uint32(payload) > networkMTU
 }
@@ -1899,6 +1911,8 @@ func hashRoute(srcAddr, dstAddr tcpip.Address, protocol tcpip.TransportProtocolN
 }
 
 // Options holds options to configure a new protocol.
+//
+// +stateify savable
 type Options struct {
 	// IGMP holds options for IGMP.
 	IGMP IGMPOptions
@@ -1949,7 +1963,7 @@ func NewProtocol(s *stack.Stack) stack.NetworkProtocol {
 	return NewProtocolWithOptions(Options{})(s)
 }
 
-func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeader header.IPv4) (stack.PacketBufferPtr, bool) {
+func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeader header.IPv4) (*stack.PacketBuffer, bool) {
 	fragPkt, offset, copied, more := pf.BuildNextFragment()
 	fragPkt.NetworkProtocolNumber = ProtocolNumber
 
@@ -2290,7 +2304,7 @@ type optionTracker struct {
 //
 // If there were no errors during parsing, the new set of options is returned as
 // a new buffer.
-func (e *endpoint) processIPOptions(pkt stack.PacketBufferPtr, opts header.IPv4Options, usage optionsUsage) (header.IPv4Options, optionTracker, *header.IPv4OptParameterProblem) {
+func (e *endpoint) processIPOptions(pkt *stack.PacketBuffer, opts header.IPv4Options, usage optionsUsage) (header.IPv4Options, optionTracker, *header.IPv4OptParameterProblem) {
 	stats := e.stats.ip
 	optIter := opts.MakeIterator()
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go
index d538eecb..88e13bf6 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go
@@ -3,6 +3,8 @@
 package ipv4
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -21,10 +23,10 @@ func (i *icmpv4DestinationUnreachableSockError) StateSave(stateSinkObject state.
 	i.beforeSave()
 }
 
-func (i *icmpv4DestinationUnreachableSockError) afterLoad() {}
+func (i *icmpv4DestinationUnreachableSockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *icmpv4DestinationUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+func (i *icmpv4DestinationUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (i *icmpv4DestinationHostUnreachableSockError) StateTypeName() string {
@@ -45,10 +47,10 @@ func (i *icmpv4DestinationHostUnreachableSockError) StateSave(stateSinkObject st
 	stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
-func (i *icmpv4DestinationHostUnreachableSockError) afterLoad() {}
+func (i *icmpv4DestinationHostUnreachableSockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *icmpv4DestinationHostUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+func (i *icmpv4DestinationHostUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
@@ -70,10 +72,10 @@ func (i *icmpv4DestinationNetUnreachableSockError) StateSave(stateSinkObject sta
 	stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
-func (i *icmpv4DestinationNetUnreachableSockError) afterLoad() {}
+func (i *icmpv4DestinationNetUnreachableSockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *icmpv4DestinationNetUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+func (i *icmpv4DestinationNetUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
@@ -95,10 +97,10 @@ func (i *icmpv4DestinationPortUnreachableSockError) StateSave(stateSinkObject st
 	stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
-func (i *icmpv4DestinationPortUnreachableSockError) afterLoad() {}
+func (i *icmpv4DestinationPortUnreachableSockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *icmpv4DestinationPortUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+func (i *icmpv4DestinationPortUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
@@ -120,10 +122,10 @@ func (i *icmpv4DestinationProtoUnreachableSockError) StateSave(stateSinkObject s
 	stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
-func (i *icmpv4DestinationProtoUnreachableSockError) afterLoad() {}
+func (i *icmpv4DestinationProtoUnreachableSockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *icmpv4DestinationProtoUnreachableSockError) StateLoad(stateSourceObject state.Source) {
+func (i *icmpv4DestinationProtoUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
@@ -145,10 +147,10 @@ func (i *icmpv4SourceRouteFailedSockError) StateSave(stateSinkObject state.Sink)
 	stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
-func (i *icmpv4SourceRouteFailedSockError) afterLoad() {}
+func (i *icmpv4SourceRouteFailedSockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *icmpv4SourceRouteFailedSockError) StateLoad(stateSourceObject state.Source) {
+func (i *icmpv4SourceRouteFailedSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
@@ -170,10 +172,10 @@ func (i *icmpv4SourceHostIsolatedSockError) StateSave(stateSinkObject state.Sink
 	stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
-func (i *icmpv4SourceHostIsolatedSockError) afterLoad() {}
+func (i *icmpv4SourceHostIsolatedSockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *icmpv4SourceHostIsolatedSockError) StateLoad(stateSourceObject state.Source) {
+func (i *icmpv4SourceHostIsolatedSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
@@ -195,10 +197,10 @@ func (i *icmpv4DestinationHostUnknownSockError) StateSave(stateSinkObject state.
 	stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
-func (i *icmpv4DestinationHostUnknownSockError) afterLoad() {}
+func (i *icmpv4DestinationHostUnknownSockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *icmpv4DestinationHostUnknownSockError) StateLoad(stateSourceObject state.Source) {
+func (i *icmpv4DestinationHostUnknownSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
 }
 
@@ -222,14 +224,539 @@ func (e *icmpv4FragmentationNeededSockError) StateSave(stateSinkObject state.Sin
 	stateSinkObject.Save(1, &e.mtu)
 }
 
-func (e *icmpv4FragmentationNeededSockError) afterLoad() {}
+func (e *icmpv4FragmentationNeededSockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *icmpv4FragmentationNeededSockError) StateLoad(stateSourceObject state.Source) {
+func (e *icmpv4FragmentationNeededSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.icmpv4DestinationUnreachableSockError)
 	stateSourceObject.Load(1, &e.mtu)
 }
 
+func (i *IGMPOptions) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.IGMPOptions"
+}
+
+func (i *IGMPOptions) StateFields() []string {
+	return []string{
+		"Enabled",
+	}
+}
+
+func (i *IGMPOptions) beforeSave() {}
+
+// +checklocksignore
+func (i *IGMPOptions) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.Enabled)
+}
+
+func (i *IGMPOptions) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *IGMPOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.Enabled)
+}
+
+func (igmp *igmpState) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.igmpState"
+}
+
+func (igmp *igmpState) StateFields() []string {
+	return []string{
+		"ep",
+		"genericMulticastProtocol",
+		"mode",
+		"igmpV1Job",
+	}
+}
+
+func (igmp *igmpState) beforeSave() {}
+
+// +checklocksignore
+func (igmp *igmpState) StateSave(stateSinkObject state.Sink) {
+	igmp.beforeSave()
+	stateSinkObject.Save(0, &igmp.ep)
+	stateSinkObject.Save(1, &igmp.genericMulticastProtocol)
+	stateSinkObject.Save(2, &igmp.mode)
+	stateSinkObject.Save(3, &igmp.igmpV1Job)
+}
+
+func (igmp *igmpState) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (igmp *igmpState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &igmp.ep)
+	stateSourceObject.Load(1, &igmp.genericMulticastProtocol)
+	stateSourceObject.Load(2, &igmp.mode)
+	stateSourceObject.Load(3, &igmp.igmpV1Job)
+}
+
+func (e *endpoint) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.endpoint"
+}
+
+func (e *endpoint) StateFields() []string {
+	return []string{
+		"nic",
+		"dispatcher",
+		"protocol",
+		"stats",
+		"enabled",
+		"forwarding",
+		"multicastForwarding",
+		"addressableEndpointState",
+		"igmp",
+	}
+}
+
+func (e *endpoint) beforeSave() {}
+
+// +checklocksignore
+func (e *endpoint) StateSave(stateSinkObject state.Sink) {
+	e.beforeSave()
+	stateSinkObject.Save(0, &e.nic)
+	stateSinkObject.Save(1, &e.dispatcher)
+	stateSinkObject.Save(2, &e.protocol)
+	stateSinkObject.Save(3, &e.stats)
+	stateSinkObject.Save(4, &e.enabled)
+	stateSinkObject.Save(5, &e.forwarding)
+	stateSinkObject.Save(6, &e.multicastForwarding)
+	stateSinkObject.Save(7, &e.addressableEndpointState)
+	stateSinkObject.Save(8, &e.igmp)
+}
+
+func (e *endpoint) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (e *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &e.nic)
+	stateSourceObject.Load(1, &e.dispatcher)
+	stateSourceObject.Load(2, &e.protocol)
+	stateSourceObject.Load(3, &e.stats)
+	stateSourceObject.Load(4, &e.enabled)
+	stateSourceObject.Load(5, &e.forwarding)
+	stateSourceObject.Load(6, &e.multicastForwarding)
+	stateSourceObject.Load(7, &e.addressableEndpointState)
+	stateSourceObject.Load(8, &e.igmp)
+}
+
+func (p *protocol) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.protocol"
+}
+
+func (p *protocol) StateFields() []string {
+	return []string{
+		"stack",
+		"eps",
+		"icmpRateLimitedTypes",
+		"defaultTTL",
+		"ids",
+		"hashIV",
+		"idTS",
+		"fragmentation",
+		"options",
+		"multicastRouteTable",
+		"multicastForwardingDisp",
+	}
+}
+
+func (p *protocol) beforeSave() {}
+
+// +checklocksignore
+func (p *protocol) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.stack)
+	stateSinkObject.Save(1, &p.eps)
+	stateSinkObject.Save(2, &p.icmpRateLimitedTypes)
+	stateSinkObject.Save(3, &p.defaultTTL)
+	stateSinkObject.Save(4, &p.ids)
+	stateSinkObject.Save(5, &p.hashIV)
+	stateSinkObject.Save(6, &p.idTS)
+	stateSinkObject.Save(7, &p.fragmentation)
+	stateSinkObject.Save(8, &p.options)
+	stateSinkObject.Save(9, &p.multicastRouteTable)
+	stateSinkObject.Save(10, &p.multicastForwardingDisp)
+}
+
+func (p *protocol) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *protocol) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.stack)
+	stateSourceObject.Load(1, &p.eps)
+	stateSourceObject.Load(2, &p.icmpRateLimitedTypes)
+	stateSourceObject.Load(3, &p.defaultTTL)
+	stateSourceObject.Load(4, &p.ids)
+	stateSourceObject.Load(5, &p.hashIV)
+	stateSourceObject.Load(6, &p.idTS)
+	stateSourceObject.Load(7, &p.fragmentation)
+	stateSourceObject.Load(8, &p.options)
+	stateSourceObject.Load(9, &p.multicastRouteTable)
+	stateSourceObject.Load(10, &p.multicastForwardingDisp)
+}
+
+func (o *Options) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.Options"
+}
+
+func (o *Options) StateFields() []string {
+	return []string{
+		"IGMP",
+		"AllowExternalLoopbackTraffic",
+	}
+}
+
+func (o *Options) beforeSave() {}
+
+// +checklocksignore
+func (o *Options) StateSave(stateSinkObject state.Sink) {
+	o.beforeSave()
+	stateSinkObject.Save(0, &o.IGMP)
+	stateSinkObject.Save(1, &o.AllowExternalLoopbackTraffic)
+}
+
+func (o *Options) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (o *Options) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &o.IGMP)
+	stateSourceObject.Load(1, &o.AllowExternalLoopbackTraffic)
+}
+
+func (s *Stats) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.Stats"
+}
+
+func (s *Stats) StateFields() []string {
+	return []string{
+		"IP",
+		"IGMP",
+		"ICMP",
+	}
+}
+
+func (s *Stats) beforeSave() {}
+
+// +checklocksignore
+func (s *Stats) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.IP)
+	stateSinkObject.Save(1, &s.IGMP)
+	stateSinkObject.Save(2, &s.ICMP)
+}
+
+func (s *Stats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *Stats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.IP)
+	stateSourceObject.Load(1, &s.IGMP)
+	stateSourceObject.Load(2, &s.ICMP)
+}
+
+func (s *sharedStats) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.sharedStats"
+}
+
+func (s *sharedStats) StateFields() []string {
+	return []string{
+		"localStats",
+		"ip",
+		"icmp",
+		"igmp",
+	}
+}
+
+func (s *sharedStats) beforeSave() {}
+
+// +checklocksignore
+func (s *sharedStats) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.localStats)
+	stateSinkObject.Save(1, &s.ip)
+	stateSinkObject.Save(2, &s.icmp)
+	stateSinkObject.Save(3, &s.igmp)
+}
+
+func (s *sharedStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *sharedStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.localStats)
+	stateSourceObject.Load(1, &s.ip)
+	stateSourceObject.Load(2, &s.icmp)
+	stateSourceObject.Load(3, &s.igmp)
+}
+
+func (m *multiCounterICMPv4PacketStats) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.multiCounterICMPv4PacketStats"
+}
+
+func (m *multiCounterICMPv4PacketStats) StateFields() []string {
+	return []string{
+		"echoRequest",
+		"echoReply",
+		"dstUnreachable",
+		"srcQuench",
+		"redirect",
+		"timeExceeded",
+		"paramProblem",
+		"timestamp",
+		"timestampReply",
+		"infoRequest",
+		"infoReply",
+	}
+}
+
+func (m *multiCounterICMPv4PacketStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterICMPv4PacketStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.echoRequest)
+	stateSinkObject.Save(1, &m.echoReply)
+	stateSinkObject.Save(2, &m.dstUnreachable)
+	stateSinkObject.Save(3, &m.srcQuench)
+	stateSinkObject.Save(4, &m.redirect)
+	stateSinkObject.Save(5, &m.timeExceeded)
+	stateSinkObject.Save(6, &m.paramProblem)
+	stateSinkObject.Save(7, &m.timestamp)
+	stateSinkObject.Save(8, &m.timestampReply)
+	stateSinkObject.Save(9, &m.infoRequest)
+	stateSinkObject.Save(10, &m.infoReply)
+}
+
+func (m *multiCounterICMPv4PacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterICMPv4PacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.echoRequest)
+	stateSourceObject.Load(1, &m.echoReply)
+	stateSourceObject.Load(2, &m.dstUnreachable)
+	stateSourceObject.Load(3, &m.srcQuench)
+	stateSourceObject.Load(4, &m.redirect)
+	stateSourceObject.Load(5, &m.timeExceeded)
+	stateSourceObject.Load(6, &m.paramProblem)
+	stateSourceObject.Load(7, &m.timestamp)
+	stateSourceObject.Load(8, &m.timestampReply)
+	stateSourceObject.Load(9, &m.infoRequest)
+	stateSourceObject.Load(10, &m.infoReply)
+}
+
+func (m *multiCounterICMPv4SentPacketStats) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.multiCounterICMPv4SentPacketStats"
+}
+
+func (m *multiCounterICMPv4SentPacketStats) StateFields() []string {
+	return []string{
+		"multiCounterICMPv4PacketStats",
+		"dropped",
+		"rateLimited",
+	}
+}
+
+func (m *multiCounterICMPv4SentPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterICMPv4SentPacketStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.multiCounterICMPv4PacketStats)
+	stateSinkObject.Save(1, &m.dropped)
+	stateSinkObject.Save(2, &m.rateLimited)
+}
+
+func (m *multiCounterICMPv4SentPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterICMPv4SentPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.multiCounterICMPv4PacketStats)
+	stateSourceObject.Load(1, &m.dropped)
+	stateSourceObject.Load(2, &m.rateLimited)
+}
+
+func (m *multiCounterICMPv4ReceivedPacketStats) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.multiCounterICMPv4ReceivedPacketStats"
+}
+
+func (m *multiCounterICMPv4ReceivedPacketStats) StateFields() []string {
+	return []string{
+		"multiCounterICMPv4PacketStats",
+		"invalid",
+	}
+}
+
+func (m *multiCounterICMPv4ReceivedPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterICMPv4ReceivedPacketStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.multiCounterICMPv4PacketStats)
+	stateSinkObject.Save(1, &m.invalid)
+}
+
+func (m *multiCounterICMPv4ReceivedPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterICMPv4ReceivedPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.multiCounterICMPv4PacketStats)
+	stateSourceObject.Load(1, &m.invalid)
+}
+
+func (m *multiCounterICMPv4Stats) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.multiCounterICMPv4Stats"
+}
+
+func (m *multiCounterICMPv4Stats) StateFields() []string {
+	return []string{
+		"packetsSent",
+		"packetsReceived",
+	}
+}
+
+func (m *multiCounterICMPv4Stats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterICMPv4Stats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.packetsSent)
+	stateSinkObject.Save(1, &m.packetsReceived)
+}
+
+func (m *multiCounterICMPv4Stats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterICMPv4Stats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.packetsSent)
+	stateSourceObject.Load(1, &m.packetsReceived)
+}
+
+func (m *multiCounterIGMPPacketStats) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.multiCounterIGMPPacketStats"
+}
+
+func (m *multiCounterIGMPPacketStats) StateFields() []string {
+	return []string{
+		"membershipQuery",
+		"v1MembershipReport",
+		"v2MembershipReport",
+		"v3MembershipReport",
+		"leaveGroup",
+	}
+}
+
+func (m *multiCounterIGMPPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterIGMPPacketStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.membershipQuery)
+	stateSinkObject.Save(1, &m.v1MembershipReport)
+	stateSinkObject.Save(2, &m.v2MembershipReport)
+	stateSinkObject.Save(3, &m.v3MembershipReport)
+	stateSinkObject.Save(4, &m.leaveGroup)
+}
+
+func (m *multiCounterIGMPPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterIGMPPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.membershipQuery)
+	stateSourceObject.Load(1, &m.v1MembershipReport)
+	stateSourceObject.Load(2, &m.v2MembershipReport)
+	stateSourceObject.Load(3, &m.v3MembershipReport)
+	stateSourceObject.Load(4, &m.leaveGroup)
+}
+
+func (m *multiCounterIGMPSentPacketStats) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.multiCounterIGMPSentPacketStats"
+}
+
+func (m *multiCounterIGMPSentPacketStats) StateFields() []string {
+	return []string{
+		"multiCounterIGMPPacketStats",
+		"dropped",
+	}
+}
+
+func (m *multiCounterIGMPSentPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterIGMPSentPacketStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.multiCounterIGMPPacketStats)
+	stateSinkObject.Save(1, &m.dropped)
+}
+
+func (m *multiCounterIGMPSentPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterIGMPSentPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.multiCounterIGMPPacketStats)
+	stateSourceObject.Load(1, &m.dropped)
+}
+
+func (m *multiCounterIGMPReceivedPacketStats) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.multiCounterIGMPReceivedPacketStats"
+}
+
+func (m *multiCounterIGMPReceivedPacketStats) StateFields() []string {
+	return []string{
+		"multiCounterIGMPPacketStats",
+		"invalid",
+		"checksumErrors",
+		"unrecognized",
+	}
+}
+
+func (m *multiCounterIGMPReceivedPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterIGMPReceivedPacketStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.multiCounterIGMPPacketStats)
+	stateSinkObject.Save(1, &m.invalid)
+	stateSinkObject.Save(2, &m.checksumErrors)
+	stateSinkObject.Save(3, &m.unrecognized)
+}
+
+func (m *multiCounterIGMPReceivedPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterIGMPReceivedPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.multiCounterIGMPPacketStats)
+	stateSourceObject.Load(1, &m.invalid)
+	stateSourceObject.Load(2, &m.checksumErrors)
+	stateSourceObject.Load(3, &m.unrecognized)
+}
+
+func (m *multiCounterIGMPStats) StateTypeName() string {
+	return "pkg/tcpip/network/ipv4.multiCounterIGMPStats"
+}
+
+func (m *multiCounterIGMPStats) StateFields() []string {
+	return []string{
+		"packetsSent",
+		"packetsReceived",
+	}
+}
+
+func (m *multiCounterIGMPStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterIGMPStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.packetsSent)
+	stateSinkObject.Save(1, &m.packetsReceived)
+}
+
+func (m *multiCounterIGMPStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterIGMPStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.packetsSent)
+	stateSourceObject.Load(1, &m.packetsReceived)
+}
+
 func init() {
 	state.Register((*icmpv4DestinationUnreachableSockError)(nil))
 	state.Register((*icmpv4DestinationHostUnreachableSockError)(nil))
@@ -240,4 +767,19 @@ func init() {
 	state.Register((*icmpv4SourceHostIsolatedSockError)(nil))
 	state.Register((*icmpv4DestinationHostUnknownSockError)(nil))
 	state.Register((*icmpv4FragmentationNeededSockError)(nil))
+	state.Register((*IGMPOptions)(nil))
+	state.Register((*igmpState)(nil))
+	state.Register((*endpoint)(nil))
+	state.Register((*protocol)(nil))
+	state.Register((*Options)(nil))
+	state.Register((*Stats)(nil))
+	state.Register((*sharedStats)(nil))
+	state.Register((*multiCounterICMPv4PacketStats)(nil))
+	state.Register((*multiCounterICMPv4SentPacketStats)(nil))
+	state.Register((*multiCounterICMPv4ReceivedPacketStats)(nil))
+	state.Register((*multiCounterICMPv4Stats)(nil))
+	state.Register((*multiCounterIGMPPacketStats)(nil))
+	state.Register((*multiCounterIGMPSentPacketStats)(nil))
+	state.Register((*multiCounterIGMPReceivedPacketStats)(nil))
+	state.Register((*multiCounterIGMPStats)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/stats.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/stats.go
index 9ebef99e..5b59ff5c 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/stats.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/stats.go
@@ -23,6 +23,8 @@ import (
 var _ stack.IPNetworkEndpointStats = (*Stats)(nil)
 
 // Stats holds statistics related to the IPv4 protocol family.
+//
+// +stateify savable
 type Stats struct {
 	// IP holds IPv4 statistics.
 	IP tcpip.IPStats
@@ -42,6 +44,7 @@ func (s *Stats) IPStats() *tcpip.IPStats {
 	return &s.IP
 }
 
+// +stateify savable
 type sharedStats struct {
 	localStats Stats
 	ip         ip.MultiCounterIPStats
@@ -51,6 +54,7 @@ type sharedStats struct {
 
 // LINT.IfChange(multiCounterICMPv4PacketStats)
 
+// +stateify savable
 type multiCounterICMPv4PacketStats struct {
 	echoRequest    tcpip.MultiCounterStat
 	echoReply      tcpip.MultiCounterStat
@@ -83,6 +87,7 @@ func (m *multiCounterICMPv4PacketStats) init(a, b *tcpip.ICMPv4PacketStats) {
 
 // LINT.IfChange(multiCounterICMPv4SentPacketStats)
 
+// +stateify savable
 type multiCounterICMPv4SentPacketStats struct {
 	multiCounterICMPv4PacketStats
 	dropped     tcpip.MultiCounterStat
@@ -99,6 +104,7 @@ func (m *multiCounterICMPv4SentPacketStats) init(a, b *tcpip.ICMPv4SentPacketSta
 
 // LINT.IfChange(multiCounterICMPv4ReceivedPacketStats)
 
+// +stateify savable
 type multiCounterICMPv4ReceivedPacketStats struct {
 	multiCounterICMPv4PacketStats
 	invalid tcpip.MultiCounterStat
@@ -113,6 +119,7 @@ func (m *multiCounterICMPv4ReceivedPacketStats) init(a, b *tcpip.ICMPv4ReceivedP
 
 // LINT.IfChange(multiCounterICMPv4Stats)
 
+// +stateify savable
 type multiCounterICMPv4Stats struct {
 	packetsSent     multiCounterICMPv4SentPacketStats
 	packetsReceived multiCounterICMPv4ReceivedPacketStats
@@ -127,6 +134,7 @@ func (m *multiCounterICMPv4Stats) init(a, b *tcpip.ICMPv4Stats) {
 
 // LINT.IfChange(multiCounterIGMPPacketStats)
 
+// +stateify savable
 type multiCounterIGMPPacketStats struct {
 	membershipQuery    tcpip.MultiCounterStat
 	v1MembershipReport tcpip.MultiCounterStat
@@ -147,6 +155,7 @@ func (m *multiCounterIGMPPacketStats) init(a, b *tcpip.IGMPPacketStats) {
 
 // LINT.IfChange(multiCounterIGMPSentPacketStats)
 
+// +stateify savable
 type multiCounterIGMPSentPacketStats struct {
 	multiCounterIGMPPacketStats
 	dropped tcpip.MultiCounterStat
@@ -161,6 +170,7 @@ func (m *multiCounterIGMPSentPacketStats) init(a, b *tcpip.IGMPSentPacketStats)
 
 // LINT.IfChange(multiCounterIGMPReceivedPacketStats)
 
+// +stateify savable
 type multiCounterIGMPReceivedPacketStats struct {
 	multiCounterIGMPPacketStats
 	invalid        tcpip.MultiCounterStat
@@ -179,6 +189,7 @@ func (m *multiCounterIGMPReceivedPacketStats) init(a, b *tcpip.IGMPReceivedPacke
 
 // LINT.IfChange(multiCounterIGMPStats)
 
+// +stateify savable
 type multiCounterIGMPStats struct {
 	packetsSent     multiCounterIGMPSentPacketStats
 	packetsReceived multiCounterIGMPReceivedPacketStats
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/flags.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/flags.go
index a8d7bff2..251b82e9 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/flags.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/flags.go
@@ -23,7 +23,7 @@ type Flags struct {
 
 	// LoadBalanced indicates SO_REUSEPORT.
 	//
-	// LoadBalanced takes precidence over MostRecent.
+	// LoadBalanced takes precedence over MostRecent.
 	LoadBalanced bool
 
 	// TupleOnly represents TCP SO_REUSEADDR.
@@ -91,6 +91,8 @@ func (f BitFlags) ToFlags() Flags {
 }
 
 // FlagCounter counts how many references each flag combination has.
+//
+// +stateify savable
 type FlagCounter struct {
 	// refs stores the count for each possible flag combination, (0 though
 	// FlagMask).
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go
index 11a9dc0b..b2feca12 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go
@@ -18,9 +18,8 @@ package ports
 
 import (
 	"math"
-	"math/rand"
 
-	"gvisor.dev/gvisor/pkg/atomicbitops"
+	"gvisor.dev/gvisor/pkg/rand"
 	"gvisor.dev/gvisor/pkg/sync"
 	"gvisor.dev/gvisor/pkg/tcpip"
 	"gvisor.dev/gvisor/pkg/tcpip/header"
@@ -66,12 +65,14 @@ func (rs Reservation) dst() destination {
 	}
 }
 
+// +stateify savable
 type portDescriptor struct {
 	network   tcpip.NetworkProtocolNumber
 	transport tcpip.TransportProtocolNumber
 	port      uint16
 }
 
+// +stateify savable
 type destination struct {
 	addr tcpip.Address
 	port uint16
@@ -215,26 +216,21 @@ func (ad addrToDevice) isAvailable(res Reservation, portSpecified bool) bool {
 }
 
 // PortManager manages allocating, reserving and releasing ports.
+//
+// +stateify savable
 type PortManager struct {
 	// mu protects allocatedPorts.
 	// LOCK ORDERING: mu > ephemeralMu.
-	mu sync.RWMutex
+	mu sync.RWMutex `state:"nosave"`
 	// allocatedPorts is a nesting of maps that ultimately map Reservations
 	// to FlagCounters describing whether the Reservation is valid and can
 	// be reused.
 	allocatedPorts map[portDescriptor]addrToDevice
 
 	// ephemeralMu protects firstEphemeral and numEphemeral.
-	ephemeralMu    sync.RWMutex
+	ephemeralMu    sync.RWMutex `state:"nosave"`
 	firstEphemeral uint16
 	numEphemeral   uint16
-
-	// hint is used to pick ports ephemeral ports in a stable order for
-	// a given port offset.
-	//
-	// hint must be accessed using the portHint/incPortHint helpers.
-	// TODO(gvisor.dev/issue/940): S/R this field.
-	hint atomicbitops.Uint32
 }
 
 // NewPortManager creates new PortManager.
@@ -255,41 +251,13 @@ type PortTester func(port uint16) (good bool, err tcpip.Error)
 // possible ephemeral ports, allowing the caller to decide whether a given port
 // is suitable for its needs, and stopping when a port is found or an error
 // occurs.
-func (pm *PortManager) PickEphemeralPort(rng *rand.Rand, testPort PortTester) (port uint16, err tcpip.Error) {
-	pm.ephemeralMu.RLock()
-	firstEphemeral := pm.firstEphemeral
-	numEphemeral := pm.numEphemeral
-	pm.ephemeralMu.RUnlock()
-
-	offset := uint32(rng.Int31n(int32(numEphemeral)))
-	return pickEphemeralPort(offset, firstEphemeral, numEphemeral, testPort)
-}
-
-// portHint atomically reads and returns the pm.hint value.
-func (pm *PortManager) portHint() uint32 {
-	return pm.hint.Load()
-}
-
-// incPortHint atomically increments pm.hint by 1.
-func (pm *PortManager) incPortHint() {
-	pm.hint.Add(1)
-}
-
-// PickEphemeralPortStable starts at the specified offset + pm.portHint and
-// iterates over all ephemeral ports, allowing the caller to decide whether a
-// given port is suitable for its needs and stopping when a port is found or an
-// error occurs.
-func (pm *PortManager) PickEphemeralPortStable(offset uint32, testPort PortTester) (port uint16, err tcpip.Error) {
+func (pm *PortManager) PickEphemeralPort(rng rand.RNG, testPort PortTester) (port uint16, err tcpip.Error) {
 	pm.ephemeralMu.RLock()
 	firstEphemeral := pm.firstEphemeral
 	numEphemeral := pm.numEphemeral
 	pm.ephemeralMu.RUnlock()
 
-	p, err := pickEphemeralPort(pm.portHint()+offset, firstEphemeral, numEphemeral, testPort)
-	if err == nil {
-		pm.incPortHint()
-	}
-	return p, err
+	return pickEphemeralPort(rng.Uint32(), firstEphemeral, numEphemeral, testPort)
 }
 
 // pickEphemeralPort starts at the offset specified from the FirstEphemeral port
@@ -297,6 +265,7 @@ func (pm *PortManager) PickEphemeralPortStable(offset uint32, testPort PortTeste
 // caller to decide whether a given port is suitable for its needs, and stopping
 // when a port is found or an error occurs.
 func pickEphemeralPort(offset uint32, first, count uint16, testPort PortTester) (port uint16, err tcpip.Error) {
+	// This implements Algorithm 1 as per RFC 6056 Section 3.3.1.
 	for i := uint32(0); i < uint32(count); i++ {
 		port := uint16(uint32(first) + (offset+i)%uint32(count))
 		ok, err := testPort(port)
@@ -320,7 +289,7 @@ func pickEphemeralPort(offset uint32, first, count uint16, testPort PortTester)
 // An optional PortTester can be passed in which if provided will be used to
 // test if the picked port can be used. The function should return true if the
 // port is safe to use, false otherwise.
-func (pm *PortManager) ReservePort(rng *rand.Rand, res Reservation, testPort PortTester) (reservedPort uint16, err tcpip.Error) {
+func (pm *PortManager) ReservePort(rng rand.RNG, res Reservation, testPort PortTester) (reservedPort uint16, err tcpip.Error) {
 	pm.mu.Lock()
 	defer pm.mu.Unlock()
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports_state_autogen.go
index 2719f6c4..1a3ae2ad 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports_state_autogen.go
@@ -3,6 +3,8 @@
 package ports
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -28,15 +30,134 @@ func (f *Flags) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &f.TupleOnly)
 }
 
-func (f *Flags) afterLoad() {}
+func (f *Flags) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (f *Flags) StateLoad(stateSourceObject state.Source) {
+func (f *Flags) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &f.MostRecent)
 	stateSourceObject.Load(1, &f.LoadBalanced)
 	stateSourceObject.Load(2, &f.TupleOnly)
 }
 
+func (c *FlagCounter) StateTypeName() string {
+	return "pkg/tcpip/ports.FlagCounter"
+}
+
+func (c *FlagCounter) StateFields() []string {
+	return []string{
+		"refs",
+	}
+}
+
+func (c *FlagCounter) beforeSave() {}
+
+// +checklocksignore
+func (c *FlagCounter) StateSave(stateSinkObject state.Sink) {
+	c.beforeSave()
+	stateSinkObject.Save(0, &c.refs)
+}
+
+func (c *FlagCounter) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (c *FlagCounter) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &c.refs)
+}
+
+func (p *portDescriptor) StateTypeName() string {
+	return "pkg/tcpip/ports.portDescriptor"
+}
+
+func (p *portDescriptor) StateFields() []string {
+	return []string{
+		"network",
+		"transport",
+		"port",
+	}
+}
+
+func (p *portDescriptor) beforeSave() {}
+
+// +checklocksignore
+func (p *portDescriptor) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.network)
+	stateSinkObject.Save(1, &p.transport)
+	stateSinkObject.Save(2, &p.port)
+}
+
+func (p *portDescriptor) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *portDescriptor) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.network)
+	stateSourceObject.Load(1, &p.transport)
+	stateSourceObject.Load(2, &p.port)
+}
+
+func (d *destination) StateTypeName() string {
+	return "pkg/tcpip/ports.destination"
+}
+
+func (d *destination) StateFields() []string {
+	return []string{
+		"addr",
+		"port",
+	}
+}
+
+func (d *destination) beforeSave() {}
+
+// +checklocksignore
+func (d *destination) StateSave(stateSinkObject state.Sink) {
+	d.beforeSave()
+	stateSinkObject.Save(0, &d.addr)
+	stateSinkObject.Save(1, &d.port)
+}
+
+func (d *destination) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (d *destination) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &d.addr)
+	stateSourceObject.Load(1, &d.port)
+}
+
+func (pm *PortManager) StateTypeName() string {
+	return "pkg/tcpip/ports.PortManager"
+}
+
+func (pm *PortManager) StateFields() []string {
+	return []string{
+		"allocatedPorts",
+		"firstEphemeral",
+		"numEphemeral",
+	}
+}
+
+func (pm *PortManager) beforeSave() {}
+
+// +checklocksignore
+func (pm *PortManager) StateSave(stateSinkObject state.Sink) {
+	pm.beforeSave()
+	stateSinkObject.Save(0, &pm.allocatedPorts)
+	stateSinkObject.Save(1, &pm.firstEphemeral)
+	stateSinkObject.Save(2, &pm.numEphemeral)
+}
+
+func (pm *PortManager) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (pm *PortManager) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &pm.allocatedPorts)
+	stateSourceObject.Load(1, &pm.firstEphemeral)
+	stateSourceObject.Load(2, &pm.numEphemeral)
+}
+
 func init() {
 	state.Register((*Flags)(nil))
+	state.Register((*FlagCounter)(nil))
+	state.Register((*portDescriptor)(nil))
+	state.Register((*destination)(nil))
+	state.Register((*PortManager)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro_packet_list.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/route_list.go
similarity index 61%
rename from vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro_packet_list.go
rename to vendor/gvisor.dev/gvisor/pkg/tcpip/route_list.go
index 86d2f49f..ddc7c23f 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro_packet_list.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/route_list.go
@@ -1,4 +1,4 @@
-package stack
+package tcpip
 
 // ElementMapper provides an identity mapping by default.
 //
@@ -6,14 +6,14 @@ package stack
 // objects, if they are not the same. An ElementMapper is not typically
 // required if: Linker is left as is, Element is left as is, or Linker and
 // Element are the same type.
-type groPacketElementMapper struct{}
+type RouteElementMapper struct{}
 
 // linkerFor maps an Element to a Linker.
 //
 // This default implementation should be inlined.
 //
 //go:nosplit
-func (groPacketElementMapper) linkerFor(elem *groPacket) *groPacket { return elem }
+func (RouteElementMapper) linkerFor(elem *Route) *Route { return elem }
 
 // List is an intrusive list. Entries can be added to or removed from the list
 // in O(1) time and with no additional memory allocations.
@@ -27,13 +27,13 @@ func (groPacketElementMapper) linkerFor(elem *groPacket) *groPacket { return ele
 //	}
 //
 // +stateify savable
-type groPacketList struct {
-	head *groPacket
-	tail *groPacket
+type RouteList struct {
+	head *Route
+	tail *Route
 }
 
 // Reset resets list l to the empty state.
-func (l *groPacketList) Reset() {
+func (l *RouteList) Reset() {
 	l.head = nil
 	l.tail = nil
 }
@@ -41,21 +41,21 @@ func (l *groPacketList) Reset() {
 // Empty returns true iff the list is empty.
 //
 //go:nosplit
-func (l *groPacketList) Empty() bool {
+func (l *RouteList) Empty() bool {
 	return l.head == nil
 }
 
 // Front returns the first element of list l or nil.
 //
 //go:nosplit
-func (l *groPacketList) Front() *groPacket {
+func (l *RouteList) Front() *Route {
 	return l.head
 }
 
 // Back returns the last element of list l or nil.
 //
 //go:nosplit
-func (l *groPacketList) Back() *groPacket {
+func (l *RouteList) Back() *Route {
 	return l.tail
 }
 
@@ -64,8 +64,8 @@ func (l *groPacketList) Back() *groPacket {
 // NOTE: This is an O(n) operation.
 //
 //go:nosplit
-func (l *groPacketList) Len() (count int) {
-	for e := l.Front(); e != nil; e = (groPacketElementMapper{}.linkerFor(e)).Next() {
+func (l *RouteList) Len() (count int) {
+	for e := l.Front(); e != nil; e = (RouteElementMapper{}.linkerFor(e)).Next() {
 		count++
 	}
 	return count
@@ -74,12 +74,12 @@ func (l *groPacketList) Len() (count int) {
 // PushFront inserts the element e at the front of list l.
 //
 //go:nosplit
-func (l *groPacketList) PushFront(e *groPacket) {
-	linker := groPacketElementMapper{}.linkerFor(e)
+func (l *RouteList) PushFront(e *Route) {
+	linker := RouteElementMapper{}.linkerFor(e)
 	linker.SetNext(l.head)
 	linker.SetPrev(nil)
 	if l.head != nil {
-		groPacketElementMapper{}.linkerFor(l.head).SetPrev(e)
+		RouteElementMapper{}.linkerFor(l.head).SetPrev(e)
 	} else {
 		l.tail = e
 	}
@@ -90,13 +90,13 @@ func (l *groPacketList) PushFront(e *groPacket) {
 // PushFrontList inserts list m at the start of list l, emptying m.
 //
 //go:nosplit
-func (l *groPacketList) PushFrontList(m *groPacketList) {
+func (l *RouteList) PushFrontList(m *RouteList) {
 	if l.head == nil {
 		l.head = m.head
 		l.tail = m.tail
 	} else if m.head != nil {
-		groPacketElementMapper{}.linkerFor(l.head).SetPrev(m.tail)
-		groPacketElementMapper{}.linkerFor(m.tail).SetNext(l.head)
+		RouteElementMapper{}.linkerFor(l.head).SetPrev(m.tail)
+		RouteElementMapper{}.linkerFor(m.tail).SetNext(l.head)
 
 		l.head = m.head
 	}
@@ -107,12 +107,12 @@ func (l *groPacketList) PushFrontList(m *groPacketList) {
 // PushBack inserts the element e at the back of list l.
 //
 //go:nosplit
-func (l *groPacketList) PushBack(e *groPacket) {
-	linker := groPacketElementMapper{}.linkerFor(e)
+func (l *RouteList) PushBack(e *Route) {
+	linker := RouteElementMapper{}.linkerFor(e)
 	linker.SetNext(nil)
 	linker.SetPrev(l.tail)
 	if l.tail != nil {
-		groPacketElementMapper{}.linkerFor(l.tail).SetNext(e)
+		RouteElementMapper{}.linkerFor(l.tail).SetNext(e)
 	} else {
 		l.head = e
 	}
@@ -123,13 +123,13 @@ func (l *groPacketList) PushBack(e *groPacket) {
 // PushBackList inserts list m at the end of list l, emptying m.
 //
 //go:nosplit
-func (l *groPacketList) PushBackList(m *groPacketList) {
+func (l *RouteList) PushBackList(m *RouteList) {
 	if l.head == nil {
 		l.head = m.head
 		l.tail = m.tail
 	} else if m.head != nil {
-		groPacketElementMapper{}.linkerFor(l.tail).SetNext(m.head)
-		groPacketElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+		RouteElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+		RouteElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
 
 		l.tail = m.tail
 	}
@@ -140,9 +140,9 @@ func (l *groPacketList) PushBackList(m *groPacketList) {
 // InsertAfter inserts e after b.
 //
 //go:nosplit
-func (l *groPacketList) InsertAfter(b, e *groPacket) {
-	bLinker := groPacketElementMapper{}.linkerFor(b)
-	eLinker := groPacketElementMapper{}.linkerFor(e)
+func (l *RouteList) InsertAfter(b, e *Route) {
+	bLinker := RouteElementMapper{}.linkerFor(b)
+	eLinker := RouteElementMapper{}.linkerFor(e)
 
 	a := bLinker.Next()
 
@@ -151,7 +151,7 @@ func (l *groPacketList) InsertAfter(b, e *groPacket) {
 	bLinker.SetNext(e)
 
 	if a != nil {
-		groPacketElementMapper{}.linkerFor(a).SetPrev(e)
+		RouteElementMapper{}.linkerFor(a).SetPrev(e)
 	} else {
 		l.tail = e
 	}
@@ -160,9 +160,9 @@ func (l *groPacketList) InsertAfter(b, e *groPacket) {
 // InsertBefore inserts e before a.
 //
 //go:nosplit
-func (l *groPacketList) InsertBefore(a, e *groPacket) {
-	aLinker := groPacketElementMapper{}.linkerFor(a)
-	eLinker := groPacketElementMapper{}.linkerFor(e)
+func (l *RouteList) InsertBefore(a, e *Route) {
+	aLinker := RouteElementMapper{}.linkerFor(a)
+	eLinker := RouteElementMapper{}.linkerFor(e)
 
 	b := aLinker.Prev()
 	eLinker.SetNext(a)
@@ -170,7 +170,7 @@ func (l *groPacketList) InsertBefore(a, e *groPacket) {
 	aLinker.SetPrev(e)
 
 	if b != nil {
-		groPacketElementMapper{}.linkerFor(b).SetNext(e)
+		RouteElementMapper{}.linkerFor(b).SetNext(e)
 	} else {
 		l.head = e
 	}
@@ -179,19 +179,19 @@ func (l *groPacketList) InsertBefore(a, e *groPacket) {
 // Remove removes e from l.
 //
 //go:nosplit
-func (l *groPacketList) Remove(e *groPacket) {
-	linker := groPacketElementMapper{}.linkerFor(e)
+func (l *RouteList) Remove(e *Route) {
+	linker := RouteElementMapper{}.linkerFor(e)
 	prev := linker.Prev()
 	next := linker.Next()
 
 	if prev != nil {
-		groPacketElementMapper{}.linkerFor(prev).SetNext(next)
+		RouteElementMapper{}.linkerFor(prev).SetNext(next)
 	} else if l.head == e {
 		l.head = next
 	}
 
 	if next != nil {
-		groPacketElementMapper{}.linkerFor(next).SetPrev(prev)
+		RouteElementMapper{}.linkerFor(next).SetPrev(prev)
 	} else if l.tail == e {
 		l.tail = prev
 	}
@@ -205,35 +205,35 @@ func (l *groPacketList) Remove(e *groPacket) {
 // methods needed by List.
 //
 // +stateify savable
-type groPacketEntry struct {
-	next *groPacket
-	prev *groPacket
+type RouteEntry struct {
+	next *Route
+	prev *Route
 }
 
 // Next returns the entry that follows e in the list.
 //
 //go:nosplit
-func (e *groPacketEntry) Next() *groPacket {
+func (e *RouteEntry) Next() *Route {
 	return e.next
 }
 
 // Prev returns the entry that precedes e in the list.
 //
 //go:nosplit
-func (e *groPacketEntry) Prev() *groPacket {
+func (e *RouteEntry) Prev() *Route {
 	return e.prev
 }
 
 // SetNext assigns 'entry' as the entry that follows e in the list.
 //
 //go:nosplit
-func (e *groPacketEntry) SetNext(elem *groPacket) {
+func (e *RouteEntry) SetNext(elem *Route) {
 	e.next = elem
 }
 
 // SetPrev assigns 'entry' as the entry that precedes e in the list.
 //
 //go:nosplit
-func (e *groPacketEntry) SetPrev(elem *groPacket) {
+func (e *RouteEntry) SetPrev(elem *Route) {
 	e.prev = elem
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go
index a3aadb22..b8196912 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go
@@ -63,6 +63,10 @@ type SocketOptionsHandler interface {
 	// changed. The handler notifies the writers if the send buffer size is
 	// increased with setsockopt(2) for TCP endpoints.
 	WakeupWriters()
+
+	// GetAcceptConn returns true if the socket is a TCP socket and is in
+	// listening state.
+	GetAcceptConn() bool
 }
 
 // DefaultSocketOptionsHandler is an embeddable type that implements no-op
@@ -112,6 +116,11 @@ func (*DefaultSocketOptionsHandler) OnSetReceiveBufferSize(v, oldSz int64) (newS
 	return v, nil
 }
 
+// GetAcceptConn implements SocketOptionsHandler.GetAcceptConn.
+func (*DefaultSocketOptionsHandler) GetAcceptConn() bool {
+	return false
+}
+
 // StackHandler holds methods to access the stack options. These must be
 // implemented by the stack.
 type StackHandler interface {
@@ -742,3 +751,8 @@ func (so *SocketOptions) SetRcvlowat(rcvlowat int32) Error {
 	so.rcvlowat.Store(rcvlowat)
 	return nil
 }
+
+// GetAcceptConn gets value for SO_ACCEPTCONN option.
+func (so *SocketOptions) GetAcceptConn() bool {
+	return so.handler.GetAcceptConn()
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_mutex.go
index a0177a58..8373da7e 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_mutex.go
@@ -17,7 +17,7 @@ type addressStateRWMutex struct {
 var addressStatelockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type addressStatelockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go
index 866a2c36..3be2d55b 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go
@@ -1,6 +1,7 @@
 package stack
 
 import (
+	"context"
 	"fmt"
 
 	"gvisor.dev/gvisor/pkg/atomicbitops"
@@ -134,7 +135,7 @@ func (r *addressStateRefs) DecRef(destroy func()) {
 	}
 }
 
-func (r *addressStateRefs) afterLoad() {
+func (r *addressStateRefs) afterLoad(context.Context) {
 	if r.ReadRefs() > 0 {
 		refs.Register(r)
 	}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go
index f472bbf0..c0290ca6 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go
@@ -30,6 +30,8 @@ func (lifetimes *AddressLifetimes) sanitize() {
 var _ AddressableEndpoint = (*AddressableEndpointState)(nil)
 
 // AddressableEndpointState is an implementation of an AddressableEndpoint.
+//
+// +stateify savable
 type AddressableEndpointState struct {
 	networkEndpoint NetworkEndpoint
 	options         AddressableEndpointStateOptions
@@ -38,7 +40,7 @@ type AddressableEndpointState struct {
 	//
 	// AddressableEndpointState.mu
 	//   addressState.mu
-	mu addressableEndpointStateRWMutex
+	mu addressableEndpointStateRWMutex `state:"nosave"`
 	// +checklocks:mu
 	endpoints map[tcpip.Address]*addressState
 	// +checklocks:mu
@@ -47,6 +49,8 @@ type AddressableEndpointState struct {
 
 // AddressableEndpointStateOptions contains options used to configure an
 // AddressableEndpointState.
+//
+// +stateify savable
 type AddressableEndpointStateOptions struct {
 	// HiddenWhileDisabled determines whether addresses should be returned to
 	// callers while the NetworkEndpoint this AddressableEndpointState belongs
@@ -434,7 +438,7 @@ func (a *AddressableEndpointState) MainAddress() tcpip.AddressWithPrefix {
 	a.mu.RLock()
 	defer a.mu.RUnlock()
 
-	ep := a.acquirePrimaryAddressRLocked(tcpip.Address{}, func(ep *addressState) bool {
+	ep := a.acquirePrimaryAddressRLocked(tcpip.Address{}, tcpip.Address{} /* srcHint */, func(ep *addressState) bool {
 		switch kind := ep.GetKind(); kind {
 		case Permanent:
 			return a.networkEndpoint.Enabled() || !a.options.HiddenWhileDisabled
@@ -462,7 +466,7 @@ func (a *AddressableEndpointState) MainAddress() tcpip.AddressWithPrefix {
 // valid according to isValid.
 //
 // +checklocksread:a.mu
-func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(remoteAddr tcpip.Address, isValid func(*addressState) bool) *addressState {
+func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(remoteAddr, srcHint tcpip.Address, isValid func(*addressState) bool) *addressState {
 	// TODO: Move this out into IPv4-specific code.
 	// IPv6 handles source IP selection elsewhere. We have to do source
 	// selection only for IPv4, in which case ep is never deprecated. Thus
@@ -474,6 +478,11 @@ func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(remoteAddr tcpip
 			if !isValid(state) {
 				continue
 			}
+			// Source hint takes precedent over prefix matching.
+			if state.addr.Address == srcHint && srcHint != (tcpip.Address{}) {
+				best = state
+				break
+			}
 			stateLen := state.addr.Address.MatchingPrefix(remoteAddr)
 			if best == nil || bestLen < stateLen {
 				best = state
@@ -532,16 +541,20 @@ func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(remoteAddr tcpip
 // If there is no matching address, a temporary address will be returned if
 // allowTemp is true.
 //
+// If readOnly is true, the address will be returned without an extra reference.
+// In this case it is not safe to modify the endpoint, only read attributes like
+// subnet.
+//
 // Regardless how the address was obtained, it will be acquired before it is
 // returned.
-func (a *AddressableEndpointState) AcquireAssignedAddressOrMatching(localAddr tcpip.Address, f func(AddressEndpoint) bool, allowTemp bool, tempPEB PrimaryEndpointBehavior) AddressEndpoint {
+func (a *AddressableEndpointState) AcquireAssignedAddressOrMatching(localAddr tcpip.Address, f func(AddressEndpoint) bool, allowTemp bool, tempPEB PrimaryEndpointBehavior, readOnly bool) AddressEndpoint {
 	lookup := func() *addressState {
 		if addrState, ok := a.endpoints[localAddr]; ok {
 			if !addrState.IsAssigned(allowTemp) {
 				return nil
 			}
 
-			if !addrState.TryIncRef() {
+			if !readOnly && !addrState.TryIncRef() {
 				panic(fmt.Sprintf("failed to increase the reference count for address = %s", addrState.addr))
 			}
 
@@ -550,7 +563,10 @@ func (a *AddressableEndpointState) AcquireAssignedAddressOrMatching(localAddr tc
 
 		if f != nil {
 			for _, addrState := range a.endpoints {
-				if addrState.IsAssigned(allowTemp) && f(addrState) && addrState.TryIncRef() {
+				if addrState.IsAssigned(allowTemp) && f(addrState) {
+					if !readOnly && !addrState.TryIncRef() {
+						continue
+					}
 					return addrState
 				}
 			}
@@ -609,20 +625,30 @@ func (a *AddressableEndpointState) AcquireAssignedAddressOrMatching(localAddr tc
 	if ep == nil {
 		return nil
 	}
+	if readOnly {
+		if ep.addressableEndpointState == a {
+			// Checklocks doesn't understand that we are logically guaranteed to have
+			// ep.mu locked already. We need to use checklocksignore to appease the
+			// analyzer.
+			ep.addressableEndpointState.decAddressRefLocked(ep) // +checklocksignore
+		} else {
+			ep.DecRef()
+		}
+	}
 	return ep
 }
 
 // AcquireAssignedAddress implements AddressableEndpoint.
-func (a *AddressableEndpointState) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB PrimaryEndpointBehavior) AddressEndpoint {
-	return a.AcquireAssignedAddressOrMatching(localAddr, nil, allowTemp, tempPEB)
+func (a *AddressableEndpointState) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB PrimaryEndpointBehavior, readOnly bool) AddressEndpoint {
+	return a.AcquireAssignedAddressOrMatching(localAddr, nil, allowTemp, tempPEB, readOnly)
 }
 
 // AcquireOutgoingPrimaryAddress implements AddressableEndpoint.
-func (a *AddressableEndpointState) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) AddressEndpoint {
+func (a *AddressableEndpointState) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, srcHint tcpip.Address, allowExpired bool) AddressEndpoint {
 	a.mu.Lock()
 	defer a.mu.Unlock()
 
-	ep := a.acquirePrimaryAddressRLocked(remoteAddr, func(ep *addressState) bool {
+	ep := a.acquirePrimaryAddressRLocked(remoteAddr, srcHint, func(ep *addressState) bool {
 		return ep.IsAssigned(allowExpired)
 	})
 
@@ -710,6 +736,8 @@ func (a *AddressableEndpointState) Cleanup() {
 var _ AddressEndpoint = (*addressState)(nil)
 
 // addressState holds state for an address.
+//
+// +stateify savable
 type addressState struct {
 	addressableEndpointState *AddressableEndpointState
 	addr                     tcpip.AddressWithPrefix
@@ -720,7 +748,7 @@ type addressState struct {
 	//
 	// AddressableEndpointState.mu
 	//   addressState.mu
-	mu   addressStateRWMutex
+	mu   addressStateRWMutex `state:"nosave"`
 	refs addressStateRefs
 	// checklocks:mu
 	kind AddressKind
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state_mutex.go
index f78028d6..56ea53e3 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state_mutex.go
@@ -17,7 +17,7 @@ type addressableEndpointStateRWMutex struct {
 var addressableEndpointStatelockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type addressableEndpointStatelockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bridge.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bridge.go
new file mode 100644
index 00000000..72cd5913
--- /dev/null
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bridge.go
@@ -0,0 +1,229 @@
+// Copyright 2024 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stack
+
+import (
+	"gvisor.dev/gvisor/pkg/atomicbitops"
+	"gvisor.dev/gvisor/pkg/tcpip"
+	"gvisor.dev/gvisor/pkg/tcpip/header"
+)
+
+var _ NetworkLinkEndpoint = (*BridgeEndpoint)(nil)
+
+type bridgePort struct {
+	bridge *BridgeEndpoint
+	nic    *nic
+}
+
+// ParseHeader implements stack.LinkEndpoint.
+func (p *bridgePort) ParseHeader(pkt *PacketBuffer) bool {
+	_, ok := pkt.LinkHeader().Consume(header.EthernetMinimumSize)
+	return ok
+}
+
+// DeliverNetworkPacket implements stack.NetworkDispatcher.
+func (p *bridgePort) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) {
+	bridge := p.bridge
+	bridge.mu.RLock()
+
+	// Send the packet to all other ports.
+	for _, port := range bridge.ports {
+		if p == port {
+			continue
+		}
+		newPkt := NewPacketBuffer(PacketBufferOptions{
+			ReserveHeaderBytes: int(port.nic.MaxHeaderLength()),
+			Payload:            pkt.ToBuffer(),
+		})
+		port.nic.writeRawPacket(newPkt)
+		newPkt.DecRef()
+	}
+
+	d := bridge.dispatcher
+	bridge.mu.RUnlock()
+	if d != nil {
+		// The dispatcher may acquire Stack.mu in DeliverNetworkPacket(), which is
+		// ordered above bridge.mu. So call DeliverNetworkPacket() without holding
+		// bridge.mu to avoid circular locking.
+		d.DeliverNetworkPacket(protocol, pkt)
+	}
+}
+
+func (p *bridgePort) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) {
+}
+
+// NewBridgeEndpoint creates a new bridge endpoint.
+func NewBridgeEndpoint(mtu uint32) *BridgeEndpoint {
+	b := &BridgeEndpoint{
+		mtu:  mtu,
+		addr: tcpip.GetRandMacAddr(),
+	}
+	b.ports = make(map[tcpip.NICID]*bridgePort)
+	return b
+}
+
+// BridgeEndpoint is a bridge endpoint.
+type BridgeEndpoint struct {
+	mu bridgeRWMutex
+	// +checklocks:mu
+	ports map[tcpip.NICID]*bridgePort
+	// +checklocks:mu
+	dispatcher NetworkDispatcher
+	// +checklocks:mu
+	addr tcpip.LinkAddress
+	// +checklocks:mu
+	attached bool
+	// +checklocks:mu
+	mtu             uint32
+	maxHeaderLength atomicbitops.Uint32
+}
+
+// WritePackets implements stack.LinkEndpoint.WritePackets.
+func (b *BridgeEndpoint) WritePackets(pkts PacketBufferList) (int, tcpip.Error) {
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+
+	pktsSlice := pkts.AsSlice()
+	n := len(pktsSlice)
+	for _, p := range b.ports {
+		for _, pkt := range pktsSlice {
+			// In order to properly loop back to the inbound side we must create a
+			// fresh packet that only contains the underlying payload with no headers
+			// or struct fields set.
+			newPkt := NewPacketBuffer(PacketBufferOptions{
+				Payload:            pkt.ToBuffer(),
+				ReserveHeaderBytes: int(p.nic.MaxHeaderLength()),
+			})
+			newPkt.EgressRoute = pkt.EgressRoute
+			newPkt.NetworkProtocolNumber = pkt.NetworkProtocolNumber
+			p.nic.writePacket(newPkt)
+			newPkt.DecRef()
+		}
+	}
+
+	return n, nil
+}
+
+// AddNIC adds the specified NIC to the bridge.
+func (b *BridgeEndpoint) AddNIC(n *nic) tcpip.Error {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+
+	port := &bridgePort{
+		nic:    n,
+		bridge: b,
+	}
+	n.NetworkLinkEndpoint.Attach(port)
+	b.ports[n.id] = port
+
+	if b.maxHeaderLength.Load() < uint32(n.MaxHeaderLength()) {
+		b.maxHeaderLength.Store(uint32(n.MaxHeaderLength()))
+	}
+
+	return nil
+}
+
+// DelNIC remove the specified NIC from the bridge.
+func (b *BridgeEndpoint) DelNIC(nic *nic) tcpip.Error {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+
+	delete(b.ports, nic.id)
+	nic.NetworkLinkEndpoint.Attach(nic)
+	return nil
+}
+
+// MTU implements stack.LinkEndpoint.MTU.
+func (b *BridgeEndpoint) MTU() uint32 {
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+	if b.mtu > header.EthernetMinimumSize {
+		return b.mtu - header.EthernetMinimumSize
+	}
+	return 0
+}
+
+// SetMTU implements stack.LinkEndpoint.SetMTU.
+func (b *BridgeEndpoint) SetMTU(mtu uint32) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	b.mtu = mtu
+}
+
+// MaxHeaderLength implements stack.LinkEndpoint.
+func (b *BridgeEndpoint) MaxHeaderLength() uint16 {
+	return uint16(b.maxHeaderLength.Load())
+}
+
+// LinkAddress implements stack.LinkEndpoint.LinkAddress.
+func (b *BridgeEndpoint) LinkAddress() tcpip.LinkAddress {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	return b.addr
+}
+
+// SetLinkAddress implements stack.LinkEndpoint.SetLinkAddress.
+func (b *BridgeEndpoint) SetLinkAddress(addr tcpip.LinkAddress) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	b.addr = addr
+}
+
+// Capabilities implements stack.LinkEndpoint.Capabilities.
+func (b *BridgeEndpoint) Capabilities() LinkEndpointCapabilities {
+	return CapabilityRXChecksumOffload | CapabilitySaveRestore | CapabilityResolutionRequired
+}
+
+// Attach implements stack.LinkEndpoint.Attach.
+func (b *BridgeEndpoint) Attach(dispatcher NetworkDispatcher) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	for _, p := range b.ports {
+		p.nic.Primary = nil
+	}
+	b.dispatcher = dispatcher
+	b.ports = make(map[tcpip.NICID]*bridgePort)
+}
+
+// IsAttached implements stack.LinkEndpoint.IsAttached.
+func (b *BridgeEndpoint) IsAttached() bool {
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+	return b.dispatcher != nil
+}
+
+// Wait implements stack.LinkEndpoint.Wait.
+func (b *BridgeEndpoint) Wait() {
+}
+
+// ARPHardwareType implements stack.LinkEndpoint.ARPHardwareType.
+func (b *BridgeEndpoint) ARPHardwareType() header.ARPHardwareType {
+	return header.ARPHardwareEther
+}
+
+// AddHeader implements stack.LinkEndpoint.AddHeader.
+func (b *BridgeEndpoint) AddHeader(pkt *PacketBuffer) {
+}
+
+// ParseHeader implements stack.LinkEndpoint.ParseHeader.
+func (b *BridgeEndpoint) ParseHeader(*PacketBuffer) bool {
+	return true
+}
+
+// Close implements stack.LinkEndpoint.Close.
+func (b *BridgeEndpoint) Close() {}
+
+// SetOnCloseAction implements stack.LinkEndpoint.Close.
+func (b *BridgeEndpoint) SetOnCloseAction(func()) {}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bridge_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bridge_mutex.go
new file mode 100644
index 00000000..33d66936
--- /dev/null
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bridge_mutex.go
@@ -0,0 +1,96 @@
+package stack
+
+import (
+	"reflect"
+
+	"gvisor.dev/gvisor/pkg/sync"
+	"gvisor.dev/gvisor/pkg/sync/locking"
+)
+
+// RWMutex is sync.RWMutex with the correctness validator.
+type bridgeRWMutex struct {
+	mu sync.RWMutex
+}
+
+// lockNames is a list of user-friendly lock names.
+// Populated in init.
+var bridgelockNames []string
+
+// lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
+// referring to an index within lockNames.
+// Values are specified using the "consts" field of go_template_instance.
+type bridgelockNameIndex int
+
+// DO NOT REMOVE: The following function automatically replaced with lock index constants.
+// LOCK_NAME_INDEX_CONSTANTS
+const ()
+
+// Lock locks m.
+// +checklocksignore
+func (m *bridgeRWMutex) Lock() {
+	locking.AddGLock(bridgeprefixIndex, -1)
+	m.mu.Lock()
+}
+
+// NestedLock locks m knowing that another lock of the same type is held.
+// +checklocksignore
+func (m *bridgeRWMutex) NestedLock(i bridgelockNameIndex) {
+	locking.AddGLock(bridgeprefixIndex, int(i))
+	m.mu.Lock()
+}
+
+// Unlock unlocks m.
+// +checklocksignore
+func (m *bridgeRWMutex) Unlock() {
+	m.mu.Unlock()
+	locking.DelGLock(bridgeprefixIndex, -1)
+}
+
+// NestedUnlock unlocks m knowing that another lock of the same type is held.
+// +checklocksignore
+func (m *bridgeRWMutex) NestedUnlock(i bridgelockNameIndex) {
+	m.mu.Unlock()
+	locking.DelGLock(bridgeprefixIndex, int(i))
+}
+
+// RLock locks m for reading.
+// +checklocksignore
+func (m *bridgeRWMutex) RLock() {
+	locking.AddGLock(bridgeprefixIndex, -1)
+	m.mu.RLock()
+}
+
+// RUnlock undoes a single RLock call.
+// +checklocksignore
+func (m *bridgeRWMutex) RUnlock() {
+	m.mu.RUnlock()
+	locking.DelGLock(bridgeprefixIndex, -1)
+}
+
+// RLockBypass locks m for reading without executing the validator.
+// +checklocksignore
+func (m *bridgeRWMutex) RLockBypass() {
+	m.mu.RLock()
+}
+
+// RUnlockBypass undoes a single RLockBypass call.
+// +checklocksignore
+func (m *bridgeRWMutex) RUnlockBypass() {
+	m.mu.RUnlock()
+}
+
+// DowngradeLock atomically unlocks rw for writing and locks it for reading.
+// +checklocksignore
+func (m *bridgeRWMutex) DowngradeLock() {
+	m.mu.DowngradeLock()
+}
+
+var bridgeprefixIndex *locking.MutexClass
+
+// DO NOT REMOVE: The following function is automatically replaced.
+func bridgeinitLockNames() {}
+
+func init() {
+	bridgeinitLockNames()
+	bridgeprefixIndex = locking.NewMutexClass(reflect.TypeOf(bridgeRWMutex{}), bridgelockNames)
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bucket_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bucket_mutex.go
index e4100b1e..3cee9c82 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bucket_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bucket_mutex.go
@@ -17,7 +17,7 @@ type bucketRWMutex struct {
 var bucketlockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type bucketlockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/cleanup_endpoints_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/cleanup_endpoints_mutex.go
index 0270b25d..0516e7b0 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/cleanup_endpoints_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/cleanup_endpoints_mutex.go
@@ -19,7 +19,7 @@ var cleanupEndpointsprefixIndex *locking.MutexClass
 var cleanupEndpointslockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type cleanupEndpointslockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_mutex.go
index 6af809e7..6a9905ed 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_mutex.go
@@ -17,7 +17,7 @@ type connRWMutex struct {
 var connlockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type connlockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_track_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_track_mutex.go
index ad020f1e..b416fda7 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_track_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_track_mutex.go
@@ -17,7 +17,7 @@ type connTrackRWMutex struct {
 var connTracklockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type connTracklockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go
index 02bce870..ba11e381 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go
@@ -135,7 +135,8 @@ type conn struct {
 	// reply is the tuple in reply direction.
 	reply tuple
 
-	finalizeOnce sync.Once
+	// TODO(b/341946753): Restore when netstack is savable.
+	finalizeOnce sync.Once `state:"nosave"`
 	// Holds a finalizeResult.
 	finalizeResult atomicbitops.Uint32
 
@@ -177,7 +178,7 @@ func (cn *conn) timedOut(now tcpip.MonotonicTime) bool {
 }
 
 // update the connection tracking state.
-func (cn *conn) update(pkt PacketBufferPtr, reply bool) {
+func (cn *conn) update(pkt *PacketBuffer, reply bool) {
 	cn.stateMu.Lock()
 	defer cn.stateMu.Unlock()
 
@@ -228,7 +229,8 @@ type ConnTrack struct {
 
 	// clock provides timing used to determine conntrack reapings.
 	clock tcpip.Clock
-	rand  *rand.Rand
+	// TODO(b/341946753): Restore when netstack is savable.
+	rand *rand.Rand `state:"nosave"`
 
 	mu connTrackRWMutex `state:"nosave"`
 	// mu protects the buckets slice, but not buckets' contents. Only take
@@ -269,7 +271,7 @@ func v6NetAndTransHdr(icmpPayload []byte, minTransHdrLen int) (header.Network, [
 	return netHdr, transHdr[:minTransHdrLen]
 }
 
-func getEmbeddedNetAndTransHeaders(pkt PacketBufferPtr, netHdrLength int, getNetAndTransHdr netAndTransHeadersFunc, transProto tcpip.TransportProtocolNumber) (header.Network, header.ChecksummableTransport, bool) {
+func getEmbeddedNetAndTransHeaders(pkt *PacketBuffer, netHdrLength int, getNetAndTransHdr netAndTransHeadersFunc, transProto tcpip.TransportProtocolNumber) (header.Network, header.ChecksummableTransport, bool) {
 	switch transProto {
 	case header.TCPProtocolNumber:
 		if netAndTransHeader, ok := pkt.Data().PullUp(netHdrLength + header.TCPMinimumSize); ok {
@@ -285,7 +287,7 @@ func getEmbeddedNetAndTransHeaders(pkt PacketBufferPtr, netHdrLength int, getNet
 	return nil, nil, false
 }
 
-func getHeaders(pkt PacketBufferPtr) (netHdr header.Network, transHdr header.Transport, isICMPError bool, ok bool) {
+func getHeaders(pkt *PacketBuffer) (netHdr header.Network, transHdr header.Transport, isICMPError bool, ok bool) {
 	switch pkt.TransportProtocolNumber {
 	case header.TCPProtocolNumber:
 		if tcpHeader := header.TCP(pkt.TransportHeader().Slice()); len(tcpHeader) >= header.TCPMinimumSize {
@@ -373,7 +375,7 @@ func getTupleIDForRegularPacket(netHdr header.Network, netProto tcpip.NetworkPro
 	}
 }
 
-func getTupleIDForPacketInICMPError(pkt PacketBufferPtr, getNetAndTransHdr netAndTransHeadersFunc, netProto tcpip.NetworkProtocolNumber, netLen int, transProto tcpip.TransportProtocolNumber) (tupleID, bool) {
+func getTupleIDForPacketInICMPError(pkt *PacketBuffer, getNetAndTransHdr netAndTransHeadersFunc, netProto tcpip.NetworkProtocolNumber, netLen int, transProto tcpip.TransportProtocolNumber) (tupleID, bool) {
 	if netHdr, transHdr, ok := getEmbeddedNetAndTransHeaders(pkt, netLen, getNetAndTransHdr, transProto); ok {
 		return tupleID{
 			srcAddr:                   netHdr.DestinationAddress(),
@@ -396,7 +398,7 @@ const (
 	getTupleIDOKAndDontAllowNewConn
 )
 
-func getTupleIDForEchoPacket(pkt PacketBufferPtr, ident uint16, request bool) tupleID {
+func getTupleIDForEchoPacket(pkt *PacketBuffer, ident uint16, request bool) tupleID {
 	netHdr := pkt.Network()
 	tid := tupleID{
 		srcAddr:    netHdr.SourceAddress(),
@@ -414,7 +416,7 @@ func getTupleIDForEchoPacket(pkt PacketBufferPtr, ident uint16, request bool) tu
 	return tid
 }
 
-func getTupleID(pkt PacketBufferPtr) (tupleID, getTupleIDDisposition) {
+func getTupleID(pkt *PacketBuffer) (tupleID, getTupleIDDisposition) {
 	switch pkt.TransportProtocolNumber {
 	case header.TCPProtocolNumber:
 		if transHeader := header.TCP(pkt.TransportHeader().Slice()); len(transHeader) >= header.TCPMinimumSize {
@@ -504,7 +506,7 @@ func (ct *ConnTrack) init() {
 //
 // If the packet's protocol is trackable, the connection's state is updated to
 // match the contents of the packet.
-func (ct *ConnTrack) getConnAndUpdate(pkt PacketBufferPtr, skipChecksumValidation bool) *tuple {
+func (ct *ConnTrack) getConnAndUpdate(pkt *PacketBuffer, skipChecksumValidation bool) *tuple {
 	// Get or (maybe) create a connection.
 	t := func() *tuple {
 		var allowNewConn bool
@@ -695,20 +697,41 @@ func (cn *conn) finalize() bool {
 	}
 }
 
-func (cn *conn) maybePerformNoopNAT(dnat bool) {
+// If NAT has not been configured for this connection, either mark the
+// connection as configured for "no-op NAT", in the case of DNAT, or, in the
+// case of SNAT, perform source port remapping so that source ports used by
+// locally-generated traffic do not conflict with ports occupied by existing NAT
+// bindings.
+//
+// Note that in the typical case this is also a no-op, because `snatAction`
+// will do nothing if the original tuple is already unique.
+func (cn *conn) maybePerformNoopNAT(pkt *PacketBuffer, hook Hook, r *Route, dnat bool) {
 	cn.mu.Lock()
-	defer cn.mu.Unlock()
-
 	var manip *manipType
 	if dnat {
 		manip = &cn.destinationManip
 	} else {
 		manip = &cn.sourceManip
 	}
-
-	if *manip == manipNotPerformed {
+	if *manip != manipNotPerformed {
+		cn.mu.Unlock()
+		_ = cn.handlePacket(pkt, hook, r)
+		return
+	}
+	if dnat {
 		*manip = manipPerformedNoop
+		cn.mu.Unlock()
+		_ = cn.handlePacket(pkt, hook, r)
+		return
 	}
+	cn.mu.Unlock()
+
+	// At this point, we know that NAT has not yet been performed on this
+	// connection, and the DNAT case has been handled with a no-op. For SNAT, we
+	// simply perform source port remapping to ensure that source ports for
+	// locally generated traffic do not clash with ports used by existing NAT
+	// bindings.
+	_, _ = snatAction(pkt, hook, r, 0, tcpip.Address{}, true /* changePort */, false /* changeAddress */)
 }
 
 type portOrIdentRange struct {
@@ -725,7 +748,7 @@ type portOrIdentRange struct {
 //
 // Generally, only the first packet of a connection reaches this method; other
 // packets will be manipulated without needing to modify the connection.
-func (cn *conn) performNAT(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIdents portOrIdentRange, natAddress tcpip.Address, dnat bool) {
+func (cn *conn) performNAT(pkt *PacketBuffer, hook Hook, r *Route, portsOrIdents portOrIdentRange, natAddress tcpip.Address, dnat, changePort, changeAddress bool) {
 	lastPortOrIdent := func() uint16 {
 		lastPortOrIdent := uint32(portsOrIdents.start) + portsOrIdents.size - 1
 		if lastPortOrIdent > math.MaxUint16 {
@@ -762,12 +785,24 @@ func (cn *conn) performNAT(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIden
 		return
 	}
 	*manip = manipPerformed
-	*address = natAddress
+	if changeAddress {
+		*address = natAddress
+	}
+
+	// Everything below here is port-fiddling.
+	if !changePort {
+		return
+	}
 
 	// Does the current port/ident fit in the range?
 	if portsOrIdents.start <= *portOrIdent && *portOrIdent <= lastPortOrIdent {
 		// Yes, is the current reply tuple unique?
-		if other := cn.ct.connForTID(cn.reply.tupleID); other == nil {
+		//
+		// Or, does the reply tuple refer to the same connection as the current one that
+		// we are NATing? This would apply, for example, to a self-connected socket,
+		// where the original and reply tuples are identical.
+		other := cn.ct.connForTID(cn.reply.tupleID)
+		if other == nil || other.conn == cn {
 			// Yes! No need to change the port.
 			return
 		}
@@ -826,7 +861,7 @@ func (cn *conn) performNAT(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIden
 // has had NAT performed on it.
 //
 // Returns true if the packet can skip the NAT table.
-func (cn *conn) handlePacket(pkt PacketBufferPtr, hook Hook, rt *Route) bool {
+func (cn *conn) handlePacket(pkt *PacketBuffer, hook Hook, rt *Route) bool {
 	netHdr, transHdr, isICMPError, ok := getHeaders(pkt)
 	if !ok {
 		return false
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/endpoints_by_nic_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/endpoints_by_nic_mutex.go
index ba1cd360..60642030 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/endpoints_by_nic_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/endpoints_by_nic_mutex.go
@@ -17,7 +17,7 @@ type endpointsByNICRWMutex struct {
 var endpointsByNIClockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type endpointsByNIClockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go
deleted file mode 100644
index 2a2a3013..00000000
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go
+++ /dev/null
@@ -1,730 +0,0 @@
-// Copyright 2022 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stack
-
-import (
-	"bytes"
-	"fmt"
-	"time"
-
-	"gvisor.dev/gvisor/pkg/atomicbitops"
-	"gvisor.dev/gvisor/pkg/sync"
-	"gvisor.dev/gvisor/pkg/tcpip"
-	"gvisor.dev/gvisor/pkg/tcpip/header"
-)
-
-// TODO(b/256037250): Enable by default.
-// TODO(b/256037250): We parse headers here. We should save those headers in
-// PacketBuffers so they don't have to be re-parsed later.
-// TODO(b/256037250): I still see the occasional SACK block in the zero-loss
-// benchmark, which should not happen.
-// TODO(b/256037250): Some dispatchers, e.g. XDP and RecvMmsg, can receive
-// multiple packets at a time. Even if the GRO interval is 0, there is an
-// opportunity for coalescing.
-// TODO(b/256037250): We're doing some header parsing here, which presents the
-// opportunity to skip it later.
-// TODO(b/256037250): We may be able to remove locking by pairing
-// groDispatchers with link endpoint dispatchers.
-
-const (
-	// groNBuckets is the number of GRO buckets.
-	groNBuckets = 8
-
-	groNBucketsMask = groNBuckets - 1
-
-	// groBucketSize is the size of each GRO bucket.
-	groBucketSize = 8
-
-	// groMaxPacketSize is the maximum size of a GRO'd packet.
-	groMaxPacketSize = 1 << 16 // 65KB.
-)
-
-// A groBucket holds packets that are undergoing GRO.
-type groBucket struct {
-	// mu protects the fields of a bucket.
-	mu sync.Mutex
-
-	// count is the number of packets in the bucket.
-	// +checklocks:mu
-	count int
-
-	// packets is the linked list of packets.
-	// +checklocks:mu
-	packets groPacketList
-
-	// packetsPrealloc and allocIdxs are used to preallocate and reuse
-	// groPacket structs and avoid allocation.
-	// +checklocks:mu
-	packetsPrealloc [groBucketSize]groPacket
-
-	// +checklocks:mu
-	allocIdxs [groBucketSize]int
-}
-
-// +checklocks:gb.mu
-func (gb *groBucket) full() bool {
-	return gb.count == groBucketSize
-}
-
-// insert inserts pkt into the bucket.
-// +checklocks:gb.mu
-func (gb *groBucket) insert(pkt PacketBufferPtr, ipHdr []byte, tcpHdr header.TCP, ep NetworkEndpoint) {
-	groPkt := &gb.packetsPrealloc[gb.allocIdxs[gb.count]]
-	*groPkt = groPacket{
-		pkt:           pkt,
-		created:       time.Now(),
-		ep:            ep,
-		ipHdr:         ipHdr,
-		tcpHdr:        tcpHdr,
-		initialLength: pkt.Data().Size(), // pkt.Data() contains network header.
-		idx:           groPkt.idx,
-	}
-	gb.count++
-	gb.packets.PushBack(groPkt)
-}
-
-// removeOldest removes the oldest packet from gb and returns the contained
-// PacketBufferPtr. gb must not be empty.
-// +checklocks:gb.mu
-func (gb *groBucket) removeOldest() PacketBufferPtr {
-	pkt := gb.packets.Front()
-	gb.packets.Remove(pkt)
-	gb.count--
-	gb.allocIdxs[gb.count] = pkt.idx
-	ret := pkt.pkt
-	pkt.reset()
-	return ret
-}
-
-// removeOne removes a packet from gb. It also resets pkt to its zero value.
-// +checklocks:gb.mu
-func (gb *groBucket) removeOne(pkt *groPacket) {
-	gb.packets.Remove(pkt)
-	gb.count--
-	gb.allocIdxs[gb.count] = pkt.idx
-	pkt.reset()
-}
-
-// findGROPacket4 returns the groPkt that matches ipHdr and tcpHdr, or nil if
-// none exists. It also returns whether the groPkt should be flushed based on
-// differences between the two headers.
-// +checklocks:gb.mu
-func (gb *groBucket) findGROPacket4(pkt PacketBufferPtr, ipHdr header.IPv4, tcpHdr header.TCP, ep NetworkEndpoint) (*groPacket, bool) {
-	for groPkt := gb.packets.Front(); groPkt != nil; groPkt = groPkt.Next() {
-		// Do the addresses match?
-		groIPHdr := header.IPv4(groPkt.ipHdr)
-		if ipHdr.SourceAddress() != groIPHdr.SourceAddress() || ipHdr.DestinationAddress() != groIPHdr.DestinationAddress() {
-			continue
-		}
-
-		// Do the ports match?
-		if tcpHdr.SourcePort() != groPkt.tcpHdr.SourcePort() || tcpHdr.DestinationPort() != groPkt.tcpHdr.DestinationPort() {
-			continue
-		}
-
-		// We've found a packet of the same flow.
-
-		// IP checks.
-		TOS, _ := ipHdr.TOS()
-		groTOS, _ := groIPHdr.TOS()
-		if ipHdr.TTL() != groIPHdr.TTL() || TOS != groTOS {
-			return groPkt, true
-		}
-
-		// TCP checks.
-		if shouldFlushTCP(groPkt, tcpHdr) {
-			return groPkt, true
-		}
-
-		// There's an upper limit on coalesced packet size.
-		if pkt.Data().Size()-header.IPv4MinimumSize-int(tcpHdr.DataOffset())+groPkt.pkt.Data().Size() >= groMaxPacketSize {
-			return groPkt, true
-		}
-
-		return groPkt, false
-	}
-
-	return nil, false
-}
-
-// findGROPacket6 returns the groPkt that matches ipHdr and tcpHdr, or nil if
-// none exists. It also returns whether the groPkt should be flushed based on
-// differences between the two headers.
-// +checklocks:gb.mu
-func (gb *groBucket) findGROPacket6(pkt PacketBufferPtr, ipHdr header.IPv6, tcpHdr header.TCP, ep NetworkEndpoint) (*groPacket, bool) {
-	for groPkt := gb.packets.Front(); groPkt != nil; groPkt = groPkt.Next() {
-		// Do the addresses match?
-		groIPHdr := header.IPv6(groPkt.ipHdr)
-		if ipHdr.SourceAddress() != groIPHdr.SourceAddress() || ipHdr.DestinationAddress() != groIPHdr.DestinationAddress() {
-			continue
-		}
-
-		// Need to check that headers are the same except:
-		// - Traffic class, a difference of which causes a flush.
-		// - Hop limit, a difference of which causes a flush.
-		// - Length, which is checked later.
-		// - Version, which is checked by an earlier call to IsValid().
-		trafficClass, flowLabel := ipHdr.TOS()
-		groTrafficClass, groFlowLabel := groIPHdr.TOS()
-		if flowLabel != groFlowLabel || ipHdr.NextHeader() != groIPHdr.NextHeader() {
-			continue
-		}
-		// Unlike IPv4, IPv6 packets with extension headers can be coalesced.
-		if !bytes.Equal(ipHdr[header.IPv6MinimumSize:], groIPHdr[header.IPv6MinimumSize:]) {
-			continue
-		}
-
-		// Do the ports match?
-		if tcpHdr.SourcePort() != groPkt.tcpHdr.SourcePort() || tcpHdr.DestinationPort() != groPkt.tcpHdr.DestinationPort() {
-			continue
-		}
-
-		// We've found a packet of the same flow.
-
-		// TCP checks.
-		if shouldFlushTCP(groPkt, tcpHdr) {
-			return groPkt, true
-		}
-
-		// Do the traffic class and hop limit match?
-		if trafficClass != groTrafficClass || ipHdr.HopLimit() != groIPHdr.HopLimit() {
-			return groPkt, true
-		}
-
-		// This limit is artificial for IPv6 -- we could allow even
-		// larger packets via jumbograms.
-		if pkt.Data().Size()-len(ipHdr)-int(tcpHdr.DataOffset())+groPkt.pkt.Data().Size() >= groMaxPacketSize {
-			return groPkt, true
-		}
-
-		return groPkt, false
-	}
-
-	return nil, false
-}
-
-// +checklocks:gb.mu
-func (gb *groBucket) found(gd *groDispatcher, groPkt *groPacket, flushGROPkt bool, pkt PacketBufferPtr, ipHdr []byte, tcpHdr header.TCP, ep NetworkEndpoint, updateIPHdr func([]byte, int)) {
-	// Flush groPkt or merge the packets.
-	pktSize := pkt.Data().Size()
-	flags := tcpHdr.Flags()
-	dataOff := tcpHdr.DataOffset()
-	tcpPayloadSize := pkt.Data().Size() - len(ipHdr) - int(dataOff)
-	if flushGROPkt {
-		// Flush the existing GRO packet. Don't hold bucket.mu while
-		// processing the packet.
-		pkt := groPkt.pkt
-		gb.removeOne(groPkt)
-		gb.mu.Unlock()
-		ep.HandlePacket(pkt)
-		pkt.DecRef()
-		gb.mu.Lock()
-		groPkt = nil
-	} else if groPkt != nil {
-		// Merge pkt in to GRO packet.
-		pkt.Data().TrimFront(len(ipHdr) + int(dataOff))
-		groPkt.pkt.Data().Merge(pkt.Data())
-		// Update the IP total length.
-		updateIPHdr(groPkt.ipHdr, tcpPayloadSize)
-		// Add flags from the packet to the GRO packet.
-		groPkt.tcpHdr.SetFlags(uint8(groPkt.tcpHdr.Flags() | (flags & (header.TCPFlagFin | header.TCPFlagPsh))))
-
-		pkt = nil
-	}
-
-	// Flush if the packet isn't the same size as the previous packets or
-	// if certain flags are set. The reason for checking size equality is:
-	// - If the packet is smaller than the others, this is likely the end
-	//   of some message. Peers will send MSS-sized packets until they have
-	//   insufficient data to do so.
-	// - If the packet is larger than the others, this packet is either
-	//   malformed, a local GSO packet, or has already been handled by host
-	//   GRO.
-	flush := header.TCPFlags(flags)&(header.TCPFlagUrg|header.TCPFlagPsh|header.TCPFlagRst|header.TCPFlagSyn|header.TCPFlagFin) != 0
-	flush = flush || tcpPayloadSize == 0
-	if groPkt != nil {
-		flush = flush || pktSize != groPkt.initialLength
-	}
-
-	switch {
-	case flush && groPkt != nil:
-		// A merge occurred and we need to flush groPkt.
-		pkt := groPkt.pkt
-		gb.removeOne(groPkt)
-		gb.mu.Unlock()
-		ep.HandlePacket(pkt)
-		pkt.DecRef()
-	case flush && groPkt == nil:
-		// No merge occurred and the incoming packet needs to be flushed.
-		gb.mu.Unlock()
-		ep.HandlePacket(pkt)
-	case !flush && groPkt == nil:
-		// New flow and we don't need to flush. Insert pkt into GRO.
-		if gb.full() {
-			// Head is always the oldest packet
-			toFlush := gb.removeOldest()
-			gb.insert(pkt.IncRef(), ipHdr, tcpHdr, ep)
-			gb.mu.Unlock()
-			ep.HandlePacket(toFlush)
-			toFlush.DecRef()
-		} else {
-			gb.insert(pkt.IncRef(), ipHdr, tcpHdr, ep)
-			gb.mu.Unlock()
-		}
-	default:
-		// A merge occurred and we don't need to flush anything.
-		gb.mu.Unlock()
-	}
-
-	// Schedule a timer if we never had one set before.
-	if gd.flushTimerState.CompareAndSwap(flushTimerUnset, flushTimerSet) {
-		gd.flushTimer.Reset(gd.getInterval())
-	}
-}
-
-// A groPacket is packet undergoing GRO. It may be several packets coalesced
-// together.
-type groPacket struct {
-	// groPacketEntry is an intrusive list.
-	groPacketEntry
-
-	// pkt is the coalesced packet.
-	pkt PacketBufferPtr
-
-	// ipHdr is the IP (v4 or v6) header for the coalesced packet.
-	ipHdr []byte
-
-	// tcpHdr is the TCP header for the coalesced packet.
-	tcpHdr header.TCP
-
-	// created is when the packet was received.
-	created time.Time
-
-	// ep is the endpoint to which the packet will be sent after GRO.
-	ep NetworkEndpoint
-
-	// initialLength is the length of the first packet in the flow. It is
-	// used as a best-effort guess at MSS: senders will send MSS-sized
-	// packets until they run out of data, so we coalesce as long as
-	// packets are the same size.
-	initialLength int
-
-	// idx is the groPacket's index in its bucket packetsPrealloc. It is
-	// immutable.
-	idx int
-}
-
-// reset resets all mutable fields of the groPacket.
-func (pk *groPacket) reset() {
-	*pk = groPacket{
-		idx: pk.idx,
-	}
-}
-
-// payloadSize is the payload size of the coalesced packet, which does not
-// include the network or transport headers.
-func (pk *groPacket) payloadSize() int {
-	return pk.pkt.Data().Size() - len(pk.ipHdr) - int(pk.tcpHdr.DataOffset())
-}
-
-// Values held in groDispatcher.flushTimerState.
-const (
-	flushTimerUnset = iota
-	flushTimerSet
-	flushTimerClosed
-)
-
-// groDispatcher coalesces incoming packets to increase throughput.
-type groDispatcher struct {
-	// intervalNS is the interval in nanoseconds.
-	intervalNS atomicbitops.Int64
-
-	buckets [groNBuckets]groBucket
-
-	flushTimerState atomicbitops.Int32
-	flushTimer      *time.Timer
-}
-
-func (gd *groDispatcher) init(interval time.Duration) {
-	gd.intervalNS.Store(interval.Nanoseconds())
-
-	for i := range gd.buckets {
-		bucket := &gd.buckets[i]
-		bucket.mu.Lock()
-		for j := range bucket.packetsPrealloc {
-			bucket.allocIdxs[j] = j
-			bucket.packetsPrealloc[j].idx = j
-		}
-		bucket.mu.Unlock()
-	}
-
-	// Create a timer to fire far from now and cancel it immediately.
-	//
-	// The timer will be reset when there is a need for it to fire.
-	gd.flushTimer = time.AfterFunc(time.Hour, func() {
-		if !gd.flushTimerState.CompareAndSwap(flushTimerSet, flushTimerUnset) {
-			// Timer was unset or GRO is closed, do nothing further.
-			return
-		}
-
-		interval := gd.getInterval()
-		if interval == 0 {
-			gd.flushAll()
-			return
-		}
-
-		if gd.flush() && gd.flushTimerState.CompareAndSwap(flushTimerUnset, flushTimerSet) {
-			// Only reset the timer if we have more packets and the timer was
-			// previously unset. If we have no packets left, the timer is already set
-			// or GRO is being closed, do not reset the timer.
-			gd.flushTimer.Reset(interval)
-		}
-	})
-	gd.flushTimer.Stop()
-}
-
-func (gd *groDispatcher) getInterval() time.Duration {
-	return time.Duration(gd.intervalNS.Load()) * time.Nanosecond
-}
-
-// setInterval is not thread-safe and so much be protected by callers.
-func (gd *groDispatcher) setInterval(interval time.Duration) {
-	gd.intervalNS.Store(interval.Nanoseconds())
-
-	if gd.flushTimerState.Load() == flushTimerSet {
-		// Timer was previously set, reset it.
-		gd.flushTimer.Reset(interval)
-	}
-}
-
-// dispatch sends pkt up the stack after it undergoes GRO coalescing.
-func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkProtocolNumber, ep NetworkEndpoint) {
-	// If GRO is disabled simply pass the packet along.
-	if gd.getInterval() == 0 {
-		ep.HandlePacket(pkt)
-		return
-	}
-
-	switch netProto {
-	case header.IPv4ProtocolNumber:
-		gd.dispatch4(pkt, ep)
-	case header.IPv6ProtocolNumber:
-		gd.dispatch6(pkt, ep)
-	default:
-		// We can't GRO this.
-		ep.HandlePacket(pkt)
-	}
-}
-
-func (gd *groDispatcher) dispatch4(pkt PacketBufferPtr, ep NetworkEndpoint) {
-	// Immediately get the IPv4 and TCP headers. We need a way to hash the
-	// packet into its bucket, which requires addresses and ports. Linux
-	// simply gets a hash passed by hardware, but we're not so lucky.
-
-	// We only GRO TCP packets. The check for the transport protocol number
-	// is done below so that we can PullUp both the IP and TCP headers
-	// together.
-	hdrBytes, ok := pkt.Data().PullUp(header.IPv4MinimumSize + header.TCPMinimumSize)
-	if !ok {
-		ep.HandlePacket(pkt)
-		return
-	}
-	ipHdr := header.IPv4(hdrBytes)
-
-	// We don't handle fragments. That should be the vast majority of
-	// traffic, and simplifies handling.
-	if ipHdr.FragmentOffset() != 0 || ipHdr.Flags()&header.IPv4FlagMoreFragments != 0 {
-		ep.HandlePacket(pkt)
-		return
-	}
-
-	// We only handle TCP packets without IP options.
-	if ipHdr.HeaderLength() != header.IPv4MinimumSize || tcpip.TransportProtocolNumber(ipHdr.Protocol()) != header.TCPProtocolNumber {
-		ep.HandlePacket(pkt)
-		return
-	}
-	tcpHdr := header.TCP(hdrBytes[header.IPv4MinimumSize:])
-	ipHdr = ipHdr[:header.IPv4MinimumSize]
-	dataOff := tcpHdr.DataOffset()
-	if dataOff < header.TCPMinimumSize {
-		// Malformed packet: will be handled further up the stack.
-		ep.HandlePacket(pkt)
-		return
-	}
-	hdrBytes, ok = pkt.Data().PullUp(header.IPv4MinimumSize + int(dataOff))
-	if !ok {
-		// Malformed packet: will be handled further up the stack.
-		ep.HandlePacket(pkt)
-		return
-	}
-
-	tcpHdr = header.TCP(hdrBytes[header.IPv4MinimumSize:])
-
-	// If either checksum is bad, flush the packet. Since we don't know
-	// what bits were flipped, we can't identify this packet with a flow.
-	if !pkt.RXChecksumValidated {
-		if !ipHdr.IsValid(pkt.Data().Size()) || !ipHdr.IsChecksumValid() {
-			ep.HandlePacket(pkt)
-			return
-		}
-		payloadChecksum := pkt.Data().ChecksumAtOffset(header.IPv4MinimumSize + int(dataOff))
-		tcpPayloadSize := pkt.Data().Size() - header.IPv4MinimumSize - int(dataOff)
-		if !tcpHdr.IsChecksumValid(ipHdr.SourceAddress(), ipHdr.DestinationAddress(), payloadChecksum, uint16(tcpPayloadSize)) {
-			ep.HandlePacket(pkt)
-			return
-		}
-		// We've validated the checksum, no reason for others to do it
-		// again.
-		pkt.RXChecksumValidated = true
-	}
-
-	// Now we can get the bucket for the packet.
-	bucket := &gd.buckets[gd.bucketForPacket(ipHdr, tcpHdr)&groNBucketsMask]
-	bucket.mu.Lock()
-	groPkt, flushGROPkt := bucket.findGROPacket4(pkt, ipHdr, tcpHdr, ep)
-	bucket.found(gd, groPkt, flushGROPkt, pkt, ipHdr, tcpHdr, ep, updateIPv4Hdr)
-}
-
-func (gd *groDispatcher) dispatch6(pkt PacketBufferPtr, ep NetworkEndpoint) {
-	// Immediately get the IPv6 and TCP headers. We need a way to hash the
-	// packet into its bucket, which requires addresses and ports. Linux
-	// simply gets a hash passed by hardware, but we're not so lucky.
-
-	hdrBytes, ok := pkt.Data().PullUp(header.IPv6MinimumSize)
-	if !ok {
-		ep.HandlePacket(pkt)
-		return
-	}
-	ipHdr := header.IPv6(hdrBytes)
-
-	// Getting the IP header (+ extension headers) size is a bit of a pain
-	// on IPv6.
-	transProto := tcpip.TransportProtocolNumber(ipHdr.NextHeader())
-	buf := pkt.Data().ToBuffer()
-	buf.TrimFront(header.IPv6MinimumSize)
-	it := header.MakeIPv6PayloadIterator(header.IPv6ExtensionHeaderIdentifier(transProto), buf)
-	ipHdrSize := int(header.IPv6MinimumSize)
-	for {
-		transProto = tcpip.TransportProtocolNumber(it.NextHeaderIdentifier())
-		extHdr, done, err := it.Next()
-		if err != nil {
-			ep.HandlePacket(pkt)
-			return
-		}
-		if done {
-			break
-		}
-		switch extHdr.(type) {
-		// We can GRO these, so just skip over them.
-		case header.IPv6HopByHopOptionsExtHdr:
-		case header.IPv6RoutingExtHdr:
-		case header.IPv6DestinationOptionsExtHdr:
-		default:
-			// This is either a TCP header or something we can't handle.
-			ipHdrSize = int(it.HeaderOffset())
-			done = true
-		}
-		extHdr.Release()
-		if done {
-			break
-		}
-	}
-
-	hdrBytes, ok = pkt.Data().PullUp(ipHdrSize + header.TCPMinimumSize)
-	if !ok {
-		ep.HandlePacket(pkt)
-		return
-	}
-	ipHdr = header.IPv6(hdrBytes[:ipHdrSize])
-
-	// We only handle TCP packets.
-	if transProto != header.TCPProtocolNumber {
-		ep.HandlePacket(pkt)
-		return
-	}
-	tcpHdr := header.TCP(hdrBytes[ipHdrSize:])
-	dataOff := tcpHdr.DataOffset()
-	if dataOff < header.TCPMinimumSize {
-		// Malformed packet: will be handled further up the stack.
-		ep.HandlePacket(pkt)
-		return
-	}
-
-	hdrBytes, ok = pkt.Data().PullUp(ipHdrSize + int(dataOff))
-	if !ok {
-		// Malformed packet: will be handled further up the stack.
-		ep.HandlePacket(pkt)
-		return
-	}
-	tcpHdr = header.TCP(hdrBytes[ipHdrSize:])
-
-	// If either checksum is bad, flush the packet. Since we don't know
-	// what bits were flipped, we can't identify this packet with a flow.
-	if !pkt.RXChecksumValidated {
-		if !ipHdr.IsValid(pkt.Data().Size()) {
-			ep.HandlePacket(pkt)
-			return
-		}
-		payloadChecksum := pkt.Data().ChecksumAtOffset(ipHdrSize + int(dataOff))
-		tcpPayloadSize := pkt.Data().Size() - ipHdrSize - int(dataOff)
-		if !tcpHdr.IsChecksumValid(ipHdr.SourceAddress(), ipHdr.DestinationAddress(), payloadChecksum, uint16(tcpPayloadSize)) {
-			ep.HandlePacket(pkt)
-			return
-		}
-		// We've validated the checksum, no reason for others to do it
-		// again.
-		pkt.RXChecksumValidated = true
-	}
-
-	// Now we can get the bucket for the packet.
-	bucket := &gd.buckets[gd.bucketForPacket(ipHdr, tcpHdr)&groNBucketsMask]
-	bucket.mu.Lock()
-	groPkt, flushGROPkt := bucket.findGROPacket6(pkt, ipHdr, tcpHdr, ep)
-	bucket.found(gd, groPkt, flushGROPkt, pkt, ipHdr, tcpHdr, ep, updateIPv6Hdr)
-}
-
-func (gd *groDispatcher) bucketForPacket(ipHdr header.Network, tcpHdr header.TCP) int {
-	// TODO(b/256037250): Use jenkins or checksum. Write a test to print
-	// distribution.
-	var sum int
-	srcAddr := ipHdr.SourceAddress()
-	for _, val := range srcAddr.AsSlice() {
-		sum += int(val)
-	}
-	dstAddr := ipHdr.DestinationAddress()
-	for _, val := range dstAddr.AsSlice() {
-		sum += int(val)
-	}
-	sum += int(tcpHdr.SourcePort())
-	sum += int(tcpHdr.DestinationPort())
-	return sum
-}
-
-// flush sends any packets older than interval up the stack.
-//
-// Returns true iff packets remain.
-func (gd *groDispatcher) flush() bool {
-	interval := gd.intervalNS.Load()
-	old := time.Now().Add(-time.Duration(interval) * time.Nanosecond)
-	return gd.flushSinceOrEqualTo(old)
-}
-
-// flushSinceOrEqualTo sends any packets older than or equal to the specified
-// time.
-//
-// Returns true iff packets remain.
-func (gd *groDispatcher) flushSinceOrEqualTo(old time.Time) bool {
-	type pair struct {
-		pkt PacketBufferPtr
-		ep  NetworkEndpoint
-	}
-
-	hasMore := false
-
-	for i := range gd.buckets {
-		// Put packets in a slice so we don't have to hold bucket.mu
-		// when we call HandlePacket.
-		var pairsBacking [groNBuckets]pair
-		pairs := pairsBacking[:0]
-
-		bucket := &gd.buckets[i]
-		bucket.mu.Lock()
-		for groPkt := bucket.packets.Front(); groPkt != nil; groPkt = groPkt.Next() {
-			if groPkt.created.After(old) {
-				// Packets are ordered by age, so we can move
-				// on once we find one that's too new.
-				hasMore = true
-				break
-			} else {
-				pairs = append(pairs, pair{groPkt.pkt, groPkt.ep})
-				bucket.removeOne(groPkt)
-			}
-		}
-		bucket.mu.Unlock()
-
-		for _, pair := range pairs {
-			pair.ep.HandlePacket(pair.pkt)
-			pair.pkt.DecRef()
-		}
-	}
-
-	return hasMore
-}
-
-func (gd *groDispatcher) flushAll() {
-	if gd.flushSinceOrEqualTo(time.Now()) {
-		panic("packets unexpectedly remain after flushing all")
-	}
-}
-
-// close stops the GRO goroutine and releases any held packets.
-func (gd *groDispatcher) close() {
-	gd.flushTimer.Stop()
-	// Prevent the timer from being scheduled again.
-	gd.flushTimerState.Store(flushTimerClosed)
-
-	for i := range gd.buckets {
-		bucket := &gd.buckets[i]
-		bucket.mu.Lock()
-		for groPkt := bucket.packets.Front(); groPkt != nil; groPkt = bucket.packets.Front() {
-			groPkt.pkt.DecRef()
-			bucket.removeOne(groPkt)
-		}
-		bucket.mu.Unlock()
-	}
-}
-
-// String implements fmt.Stringer.
-func (gd *groDispatcher) String() string {
-	ret := "GRO state: \n"
-	for i := range gd.buckets {
-		bucket := &gd.buckets[i]
-		bucket.mu.Lock()
-		ret += fmt.Sprintf("bucket %d: %d packets: ", i, bucket.count)
-		for groPkt := bucket.packets.Front(); groPkt != nil; groPkt = groPkt.Next() {
-			ret += fmt.Sprintf("%s (%d), ", groPkt.created, groPkt.pkt.Data().Size())
-		}
-		ret += "\n"
-		bucket.mu.Unlock()
-	}
-	return ret
-}
-
-// shouldFlushTCP returns whether the TCP headers indicate that groPkt should
-// be flushed
-func shouldFlushTCP(groPkt *groPacket, tcpHdr header.TCP) bool {
-	flags := tcpHdr.Flags()
-	groPktFlags := groPkt.tcpHdr.Flags()
-	dataOff := tcpHdr.DataOffset()
-	if flags&header.TCPFlagCwr != 0 || // Is congestion control occurring?
-		(flags^groPktFlags)&^(header.TCPFlagCwr|header.TCPFlagFin|header.TCPFlagPsh) != 0 || // Do the flags differ besides CRW, FIN, and PSH?
-		tcpHdr.AckNumber() != groPkt.tcpHdr.AckNumber() || // Do the ACKs match?
-		dataOff != groPkt.tcpHdr.DataOffset() || // Are the TCP headers the same length?
-		groPkt.tcpHdr.SequenceNumber()+uint32(groPkt.payloadSize()) != tcpHdr.SequenceNumber() { // Does the incoming packet match the expected sequence number?
-		return true
-	}
-	// The options, including timestamps, must be identical.
-	return !bytes.Equal(tcpHdr[header.TCPMinimumSize:], groPkt.tcpHdr[header.TCPMinimumSize:])
-}
-
-func updateIPv4Hdr(ipHdrBytes []byte, newBytes int) {
-	ipHdr := header.IPv4(ipHdrBytes)
-	ipHdr.SetTotalLength(ipHdr.TotalLength() + uint16(newBytes))
-}
-
-func updateIPv6Hdr(ipHdrBytes []byte, newBytes int) {
-	ipHdr := header.IPv6(ipHdrBytes)
-	ipHdr.SetPayloadLength(ipHdr.PayloadLength() + uint16(newBytes))
-}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/icmp_rate_limit.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/icmp_rate_limit.go
index 99e5d2df..560543db 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/icmp_rate_limit.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/icmp_rate_limit.go
@@ -31,8 +31,11 @@ const (
 
 // ICMPRateLimiter is a global rate limiter that controls the generation of
 // ICMP messages generated by the stack.
+//
+// +stateify savable
 type ICMPRateLimiter struct {
-	limiter *rate.Limiter
+	// TODO(b/341946753): Restore when netstack is savable.
+	limiter *rate.Limiter `state:"nosave"`
 	clock   tcpip.Clock
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go
index 9efeb595..a28ea90c 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go
@@ -15,8 +15,10 @@
 package stack
 
 import (
+	"context"
 	"fmt"
 	"math/rand"
+	"reflect"
 	"time"
 
 	"gvisor.dev/gvisor/pkg/tcpip"
@@ -48,11 +50,11 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables {
 		v4Tables: [NumTables]Table{
 			NATID: {
 				Rules: []Rule{
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
-					{Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
 				},
 				BuiltinChains: [NumHooks]int{
 					Prerouting:  0,
@@ -71,9 +73,9 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables {
 			},
 			MangleID: {
 				Rules: []Rule{
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
-					{Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
 				},
 				BuiltinChains: [NumHooks]int{
 					Prerouting: 0,
@@ -89,10 +91,10 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables {
 			},
 			FilterID: {
 				Rules: []Rule{
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
-					{Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+					{Filter: EmptyFilter4(), Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
 				},
 				BuiltinChains: [NumHooks]int{
 					Prerouting:  HookUnset,
@@ -113,11 +115,11 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables {
 		v6Tables: [NumTables]Table{
 			NATID: {
 				Rules: []Rule{
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
-					{Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
 				},
 				BuiltinChains: [NumHooks]int{
 					Prerouting:  0,
@@ -136,9 +138,9 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables {
 			},
 			MangleID: {
 				Rules: []Rule{
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
-					{Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
 				},
 				BuiltinChains: [NumHooks]int{
 					Prerouting: 0,
@@ -154,10 +156,10 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables {
 			},
 			FilterID: {
 				Rules: []Rule{
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
-					{Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
-					{Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
+					{Filter: EmptyFilter6(), Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}},
 				},
 				BuiltinChains: [NumHooks]int{
 					Prerouting:  HookUnset,
@@ -232,11 +234,29 @@ func (it *IPTables) getTableRLocked(id TableID, ipv6 bool) Table {
 // ReplaceTable replaces or inserts table by name. It panics when an invalid id
 // is provided.
 func (it *IPTables) ReplaceTable(id TableID, table Table, ipv6 bool) {
+	it.replaceTable(id, table, ipv6, false /* force */)
+}
+
+// ForceReplaceTable replaces or inserts table by name. It panics when an invalid id
+// is provided. It enables iptables even when the inserted table is all
+// conditionless ACCEPT, skipping our optimization that disables iptables until
+// they're modified.
+func (it *IPTables) ForceReplaceTable(id TableID, table Table, ipv6 bool) {
+	it.replaceTable(id, table, ipv6, true /* force */)
+}
+
+func (it *IPTables) replaceTable(id TableID, table Table, ipv6, force bool) {
 	it.mu.Lock()
 	defer it.mu.Unlock()
+
 	// If iptables is being enabled, initialize the conntrack table and
 	// reaper.
 	if !it.modified {
+		// Don't do anything if the table is identical.
+		if ((ipv6 && reflect.DeepEqual(table, it.v6Tables[id])) || (!ipv6 && reflect.DeepEqual(table, it.v4Tables[id]))) && !force {
+			return
+		}
+
 		it.connections.init()
 		it.startReaper(reaperDelay)
 	}
@@ -281,7 +301,7 @@ type checkTable struct {
 //   - Calls to dynamic functions, which can allocate.
 //
 // +checkescape:hard
-func (it *IPTables) shouldSkipOrPopulateTables(tables []checkTable, pkt PacketBufferPtr) bool {
+func (it *IPTables) shouldSkipOrPopulateTables(tables []checkTable, pkt *PacketBuffer) bool {
 	switch pkt.NetworkProtocolNumber {
 	case header.IPv4ProtocolNumber, header.IPv6ProtocolNumber:
 	default:
@@ -316,7 +336,7 @@ func (it *IPTables) shouldSkipOrPopulateTables(tables []checkTable, pkt PacketBu
 // that it does not allocate. Note that called functions (e.g.
 // getConnAndUpdate) can allocate.
 // TODO(b/233951539): checkescape fails on arm sometimes. Fix and re-add.
-func (it *IPTables) CheckPrerouting(pkt PacketBufferPtr, addressEP AddressableEndpoint, inNicName string) bool {
+func (it *IPTables) CheckPrerouting(pkt *PacketBuffer, addressEP AddressableEndpoint, inNicName string) bool {
 	tables := [...]checkTable{
 		{
 			fn:      check,
@@ -354,7 +374,7 @@ func (it *IPTables) CheckPrerouting(pkt PacketBufferPtr, addressEP AddressableEn
 // that it does not allocate. Note that called functions (e.g.
 // getConnAndUpdate) can allocate.
 // TODO(b/233951539): checkescape fails on arm sometimes. Fix and re-add.
-func (it *IPTables) CheckInput(pkt PacketBufferPtr, inNicName string) bool {
+func (it *IPTables) CheckInput(pkt *PacketBuffer, inNicName string) bool {
 	tables := [...]checkTable{
 		{
 			fn:      checkNAT,
@@ -394,7 +414,7 @@ func (it *IPTables) CheckInput(pkt PacketBufferPtr, inNicName string) bool {
 // that it does not allocate. Note that called functions (e.g.
 // getConnAndUpdate) can allocate.
 // TODO(b/233951539): checkescape fails on arm sometimes. Fix and re-add.
-func (it *IPTables) CheckForward(pkt PacketBufferPtr, inNicName, outNicName string) bool {
+func (it *IPTables) CheckForward(pkt *PacketBuffer, inNicName, outNicName string) bool {
 	tables := [...]checkTable{
 		{
 			fn:      check,
@@ -426,7 +446,7 @@ func (it *IPTables) CheckForward(pkt PacketBufferPtr, inNicName, outNicName stri
 // that it does not allocate. Note that called functions (e.g.
 // getConnAndUpdate) can allocate.
 // TODO(b/233951539): checkescape fails on arm sometimes. Fix and re-add.
-func (it *IPTables) CheckOutput(pkt PacketBufferPtr, r *Route, outNicName string) bool {
+func (it *IPTables) CheckOutput(pkt *PacketBuffer, r *Route, outNicName string) bool {
 	tables := [...]checkTable{
 		{
 			fn:      check,
@@ -470,7 +490,7 @@ func (it *IPTables) CheckOutput(pkt PacketBufferPtr, r *Route, outNicName string
 // that it does not allocate. Note that called functions (e.g.
 // getConnAndUpdate) can allocate.
 // TODO(b/233951539): checkescape fails on arm sometimes. Fix and re-add.
-func (it *IPTables) CheckPostrouting(pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, outNicName string) bool {
+func (it *IPTables) CheckPostrouting(pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, outNicName string) bool {
 	tables := [...]checkTable{
 		{
 			fn:      check,
@@ -501,16 +521,16 @@ func (it *IPTables) CheckPostrouting(pkt PacketBufferPtr, r *Route, addressEP Ad
 
 // Note: this used to omit the *IPTables parameter, but doing so caused
 // unnecessary allocations.
-type checkTableFn func(it *IPTables, table Table, hook Hook, pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool
+type checkTableFn func(it *IPTables, table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool
 
-func checkNAT(it *IPTables, table Table, hook Hook, pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {
+func checkNAT(it *IPTables, table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {
 	return it.checkNAT(table, hook, pkt, r, addressEP, inNicName, outNicName)
 }
 
 // checkNAT runs the packet through the NAT table.
 //
 // See check.
-func (it *IPTables) checkNAT(table Table, hook Hook, pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {
+func (it *IPTables) checkNAT(table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {
 	t := pkt.tuple
 	if t != nil && t.conn.handlePacket(pkt, hook, r) {
 		return true
@@ -541,14 +561,13 @@ func (it *IPTables) checkNAT(table Table, hook Hook, pkt PacketBufferPtr, r *Rou
 	//
 	// If the packet was already NATed, the connection must be NATed.
 	if !natDone {
-		t.conn.maybePerformNoopNAT(dnat)
-		_ = t.conn.handlePacket(pkt, hook, r)
+		t.conn.maybePerformNoopNAT(pkt, hook, r, dnat)
 	}
 
 	return true
 }
 
-func check(it *IPTables, table Table, hook Hook, pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {
+func check(it *IPTables, table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {
 	return it.check(table, hook, pkt, r, addressEP, inNicName, outNicName)
 }
 
@@ -557,7 +576,7 @@ func check(it *IPTables, table Table, hook Hook, pkt PacketBufferPtr, r *Route,
 // network stack or tables, or false when it must be dropped.
 //
 // Precondition: The packet's network and transport header must be set.
-func (it *IPTables) check(table Table, hook Hook, pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {
+func (it *IPTables) check(table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool {
 	ruleIdx := table.BuiltinChains[hook]
 	switch verdict := it.checkChain(hook, pkt, table, ruleIdx, r, addressEP, inNicName, outNicName); verdict {
 	// If the table returns Accept, move on to the next table.
@@ -594,7 +613,7 @@ func (it *IPTables) beforeSave() {
 }
 
 // afterLoad is invoked by stateify.
-func (it *IPTables) afterLoad() {
+func (it *IPTables) afterLoad(context.Context) {
 	it.startReaper(reaperDelay)
 }
 
@@ -610,7 +629,7 @@ func (it *IPTables) startReaper(interval time.Duration) {
 // Preconditions:
 //   - pkt is a IPv4 packet of at least length header.IPv4MinimumSize.
 //   - pkt.NetworkHeader is not nil.
-func (it *IPTables) checkChain(hook Hook, pkt PacketBufferPtr, table Table, ruleIdx int, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) chainVerdict {
+func (it *IPTables) checkChain(hook Hook, pkt *PacketBuffer, table Table, ruleIdx int, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) chainVerdict {
 	// Start from ruleIdx and walk the list of rules until a rule gives us
 	// a verdict.
 	for ruleIdx < len(table.Rules) {
@@ -660,7 +679,7 @@ func (it *IPTables) checkChain(hook Hook, pkt PacketBufferPtr, table Table, rule
 //
 // * pkt is a IPv4 packet of at least length header.IPv4MinimumSize.
 // * pkt.NetworkHeader is not nil.
-func (it *IPTables) checkRule(hook Hook, pkt PacketBufferPtr, table Table, ruleIdx int, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) (RuleVerdict, int) {
+func (it *IPTables) checkRule(hook Hook, pkt *PacketBuffer, table Table, ruleIdx int, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) (RuleVerdict, int) {
 	rule := table.Rules[ruleIdx]
 
 	// Check whether the packet matches the IP header filter.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_mutex.go
index 984498d1..9a2b97f0 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_mutex.go
@@ -17,7 +17,7 @@ type ipTablesRWMutex struct {
 var ipTableslockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type ipTableslockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go
index 4ba1f3e8..3ddc5d98 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go
@@ -24,31 +24,35 @@ import (
 )
 
 // AcceptTarget accepts packets.
+//
+// +stateify savable
 type AcceptTarget struct {
 	// NetworkProtocol is the network protocol the target is used with.
 	NetworkProtocol tcpip.NetworkProtocolNumber
 }
 
 // Action implements Target.Action.
-func (*AcceptTarget) Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) {
+func (*AcceptTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) {
 	return RuleAccept, 0
 }
 
 // DropTarget drops packets.
+//
+// +stateify savable
 type DropTarget struct {
 	// NetworkProtocol is the network protocol the target is used with.
 	NetworkProtocol tcpip.NetworkProtocolNumber
 }
 
 // Action implements Target.Action.
-func (*DropTarget) Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) {
+func (*DropTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) {
 	return RuleDrop, 0
 }
 
 // RejectIPv4WithHandler handles rejecting a packet.
 type RejectIPv4WithHandler interface {
 	// SendRejectionError sends an error packet in response to the packet.
-	SendRejectionError(pkt PacketBufferPtr, rejectWith RejectIPv4WithICMPType, inputHook bool) tcpip.Error
+	SendRejectionError(pkt *PacketBuffer, rejectWith RejectIPv4WithICMPType, inputHook bool) tcpip.Error
 }
 
 // RejectIPv4WithICMPType indicates the type of ICMP error that should be sent.
@@ -67,13 +71,15 @@ const (
 
 // RejectIPv4Target drops packets and sends back an error packet in response to the
 // matched packet.
+//
+// +stateify savable
 type RejectIPv4Target struct {
 	Handler    RejectIPv4WithHandler
 	RejectWith RejectIPv4WithICMPType
 }
 
 // Action implements Target.Action.
-func (rt *RejectIPv4Target) Action(pkt PacketBufferPtr, hook Hook, _ *Route, _ AddressableEndpoint) (RuleVerdict, int) {
+func (rt *RejectIPv4Target) Action(pkt *PacketBuffer, hook Hook, _ *Route, _ AddressableEndpoint) (RuleVerdict, int) {
 	switch hook {
 	case Input, Forward, Output:
 		// There is nothing reasonable for us to do in response to an error here;
@@ -90,7 +96,7 @@ func (rt *RejectIPv4Target) Action(pkt PacketBufferPtr, hook Hook, _ *Route, _ A
 // RejectIPv6WithHandler handles rejecting a packet.
 type RejectIPv6WithHandler interface {
 	// SendRejectionError sends an error packet in response to the packet.
-	SendRejectionError(pkt PacketBufferPtr, rejectWith RejectIPv6WithICMPType, forwardingHook bool) tcpip.Error
+	SendRejectionError(pkt *PacketBuffer, rejectWith RejectIPv6WithICMPType, forwardingHook bool) tcpip.Error
 }
 
 // RejectIPv6WithICMPType indicates the type of ICMP error that should be sent.
@@ -107,13 +113,15 @@ const (
 
 // RejectIPv6Target drops packets and sends back an error packet in response to the
 // matched packet.
+//
+// +stateify savable
 type RejectIPv6Target struct {
 	Handler    RejectIPv6WithHandler
 	RejectWith RejectIPv6WithICMPType
 }
 
 // Action implements Target.Action.
-func (rt *RejectIPv6Target) Action(pkt PacketBufferPtr, hook Hook, _ *Route, _ AddressableEndpoint) (RuleVerdict, int) {
+func (rt *RejectIPv6Target) Action(pkt *PacketBuffer, hook Hook, _ *Route, _ AddressableEndpoint) (RuleVerdict, int) {
 	switch hook {
 	case Input, Forward, Output:
 		// There is nothing reasonable for us to do in response to an error here;
@@ -129,18 +137,22 @@ func (rt *RejectIPv6Target) Action(pkt PacketBufferPtr, hook Hook, _ *Route, _ A
 
 // ErrorTarget logs an error and drops the packet. It represents a target that
 // should be unreachable.
+//
+// +stateify savable
 type ErrorTarget struct {
 	// NetworkProtocol is the network protocol the target is used with.
 	NetworkProtocol tcpip.NetworkProtocolNumber
 }
 
 // Action implements Target.Action.
-func (*ErrorTarget) Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) {
+func (*ErrorTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) {
 	log.Debugf("ErrorTarget triggered.")
 	return RuleDrop, 0
 }
 
 // UserChainTarget marks a rule as the beginning of a user chain.
+//
+// +stateify savable
 type UserChainTarget struct {
 	// Name is the chain name.
 	Name string
@@ -150,23 +162,27 @@ type UserChainTarget struct {
 }
 
 // Action implements Target.Action.
-func (*UserChainTarget) Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) {
+func (*UserChainTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) {
 	panic("UserChainTarget should never be called.")
 }
 
 // ReturnTarget returns from the current chain. If the chain is a built-in, the
 // hook's underflow should be called.
+//
+// +stateify savable
 type ReturnTarget struct {
 	// NetworkProtocol is the network protocol the target is used with.
 	NetworkProtocol tcpip.NetworkProtocolNumber
 }
 
 // Action implements Target.Action.
-func (*ReturnTarget) Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) {
+func (*ReturnTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) {
 	return RuleReturn, 0
 }
 
 // DNATTarget modifies the destination port/IP of packets.
+//
+// +stateify savable
 type DNATTarget struct {
 	// The new destination address for packets.
 	//
@@ -182,10 +198,20 @@ type DNATTarget struct {
 	//
 	// Immutable.
 	NetworkProtocol tcpip.NetworkProtocolNumber
+
+	// ChangeAddress indicates whether we should check addresses.
+	//
+	// Immutable.
+	ChangeAddress bool
+
+	// ChangePort indicates whether we should check ports.
+	//
+	// Immutable.
+	ChangePort bool
 }
 
 // Action implements Target.Action.
-func (rt *DNATTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) {
+func (rt *DNATTarget) Action(pkt *PacketBuffer, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) {
 	// Sanity check.
 	if rt.NetworkProtocol != pkt.NetworkProtocolNumber {
 		panic(fmt.Sprintf(
@@ -201,7 +227,7 @@ func (rt *DNATTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addressEP
 		panic(fmt.Sprintf("%s unrecognized", hook))
 	}
 
-	return dnatAction(pkt, hook, r, rt.Port, rt.Addr)
+	return dnatAction(pkt, hook, r, rt.Port, rt.Addr, rt.ChangePort, rt.ChangeAddress)
 
 }
 
@@ -209,6 +235,8 @@ func (rt *DNATTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addressEP
 // destination port/IP. Outgoing packets are redirected to the loopback device,
 // and incoming packets are redirected to the incoming interface (rather than
 // forwarded).
+//
+// +stateify savable
 type RedirectTarget struct {
 	// Port indicates port used to redirect. It is immutable.
 	Port uint16
@@ -219,7 +247,7 @@ type RedirectTarget struct {
 }
 
 // Action implements Target.Action.
-func (rt *RedirectTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) {
+func (rt *RedirectTarget) Action(pkt *PacketBuffer, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) {
 	// Sanity check.
 	if rt.NetworkProtocol != pkt.NetworkProtocolNumber {
 		panic(fmt.Sprintf(
@@ -244,10 +272,12 @@ func (rt *RedirectTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addre
 		panic("redirect target is supported only on output and prerouting hooks")
 	}
 
-	return dnatAction(pkt, hook, r, rt.Port, address)
+	return dnatAction(pkt, hook, r, rt.Port, address, true /* changePort */, true /* changeAddress */)
 }
 
 // SNATTarget modifies the source port/IP in the outgoing packets.
+//
+// +stateify savable
 type SNATTarget struct {
 	Addr tcpip.Address
 	Port uint16
@@ -255,10 +285,20 @@ type SNATTarget struct {
 	// NetworkProtocol is the network protocol the target is used with. It
 	// is immutable.
 	NetworkProtocol tcpip.NetworkProtocolNumber
+
+	// ChangeAddress indicates whether we should check addresses.
+	//
+	// Immutable.
+	ChangeAddress bool
+
+	// ChangePort indicates whether we should check ports.
+	//
+	// Immutable.
+	ChangePort bool
 }
 
-func dnatAction(pkt PacketBufferPtr, hook Hook, r *Route, port uint16, address tcpip.Address) (RuleVerdict, int) {
-	return natAction(pkt, hook, r, portOrIdentRange{start: port, size: 1}, address, true /* dnat */)
+func dnatAction(pkt *PacketBuffer, hook Hook, r *Route, port uint16, address tcpip.Address, changePort, changeAddress bool) (RuleVerdict, int) {
+	return natAction(pkt, hook, r, portOrIdentRange{start: port, size: 1}, address, true /* dnat */, changePort, changeAddress)
 }
 
 func targetPortRangeForTCPAndUDP(originalSrcPort uint16) portOrIdentRange {
@@ -278,7 +318,7 @@ func targetPortRangeForTCPAndUDP(originalSrcPort uint16) portOrIdentRange {
 	}
 }
 
-func snatAction(pkt PacketBufferPtr, hook Hook, r *Route, port uint16, address tcpip.Address) (RuleVerdict, int) {
+func snatAction(pkt *PacketBuffer, hook Hook, r *Route, port uint16, address tcpip.Address, changePort, changeAddress bool) (RuleVerdict, int) {
 	portsOrIdents := portOrIdentRange{start: port, size: 1}
 
 	switch pkt.TransportProtocolNumber {
@@ -298,17 +338,17 @@ func snatAction(pkt PacketBufferPtr, hook Hook, r *Route, port uint16, address t
 		portsOrIdents = portOrIdentRange{start: 0, size: math.MaxUint16 + 1}
 	}
 
-	return natAction(pkt, hook, r, portsOrIdents, address, false /* dnat */)
+	return natAction(pkt, hook, r, portsOrIdents, address, false /* dnat */, changePort, changeAddress)
 }
 
-func natAction(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIdents portOrIdentRange, address tcpip.Address, dnat bool) (RuleVerdict, int) {
+func natAction(pkt *PacketBuffer, hook Hook, r *Route, portsOrIdents portOrIdentRange, address tcpip.Address, dnat, changePort, changeAddress bool) (RuleVerdict, int) {
 	// Drop the packet if network and transport header are not set.
 	if len(pkt.NetworkHeader().Slice()) == 0 || len(pkt.TransportHeader().Slice()) == 0 {
 		return RuleDrop, 0
 	}
 
 	if t := pkt.tuple; t != nil {
-		t.conn.performNAT(pkt, hook, r, portsOrIdents, address, dnat)
+		t.conn.performNAT(pkt, hook, r, portsOrIdents, address, dnat, changePort, changeAddress)
 		return RuleAccept, 0
 	}
 
@@ -316,7 +356,7 @@ func natAction(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIdents portOrIde
 }
 
 // Action implements Target.Action.
-func (st *SNATTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, _ AddressableEndpoint) (RuleVerdict, int) {
+func (st *SNATTarget) Action(pkt *PacketBuffer, hook Hook, r *Route, _ AddressableEndpoint) (RuleVerdict, int) {
 	// Sanity check.
 	if st.NetworkProtocol != pkt.NetworkProtocolNumber {
 		panic(fmt.Sprintf(
@@ -332,10 +372,12 @@ func (st *SNATTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, _ Address
 		panic(fmt.Sprintf("%s unrecognized", hook))
 	}
 
-	return snatAction(pkt, hook, r, st.Port, st.Addr)
+	return snatAction(pkt, hook, r, st.Port, st.Addr, st.ChangePort, st.ChangeAddress)
 }
 
 // MasqueradeTarget modifies the source port/IP in the outgoing packets.
+//
+// +stateify savable
 type MasqueradeTarget struct {
 	// NetworkProtocol is the network protocol the target is used with. It
 	// is immutable.
@@ -343,7 +385,7 @@ type MasqueradeTarget struct {
 }
 
 // Action implements Target.Action.
-func (mt *MasqueradeTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) {
+func (mt *MasqueradeTarget) Action(pkt *PacketBuffer, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) {
 	// Sanity check.
 	if mt.NetworkProtocol != pkt.NetworkProtocolNumber {
 		panic(fmt.Sprintf(
@@ -360,7 +402,7 @@ func (mt *MasqueradeTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, add
 	}
 
 	// addressEP is expected to be set for the postrouting hook.
-	ep := addressEP.AcquireOutgoingPrimaryAddress(pkt.Network().DestinationAddress(), false /* allowExpired */)
+	ep := addressEP.AcquireOutgoingPrimaryAddress(pkt.Network().DestinationAddress(), tcpip.Address{} /* srcHint */, false /* allowExpired */)
 	if ep == nil {
 		// No address exists that we can use as a source address.
 		return RuleDrop, 0
@@ -368,7 +410,7 @@ func (mt *MasqueradeTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, add
 
 	address := ep.AddressWithPrefix().Address
 	ep.DecRef()
-	return snatAction(pkt, hook, r, 0 /* port */, address)
+	return snatAction(pkt, hook, r, 0 /* port */, address, true /* changePort */, true /* changeAddress */)
 }
 
 func rewritePacket(n header.Network, t header.Transport, updateSRCFields, fullChecksum, updatePseudoHeader bool, newPortOrIdent uint16, newAddr tcpip.Address) {
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go
index 3a908f9e..0c7ce686 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go
@@ -84,7 +84,7 @@ type IPTables struct {
 
 	reaper tcpip.Timer
 
-	mu ipTablesRWMutex
+	mu ipTablesRWMutex `state:"nosave"`
 	// v4Tables and v6tables map tableIDs to tables. They hold builtin
 	// tables only, not user tables.
 	//
@@ -103,6 +103,14 @@ type IPTables struct {
 	modified bool
 }
 
+// Modified returns whether iptables has been modified. It is inherently racy
+// and intended for use only in tests.
+func (it *IPTables) Modified() bool {
+	it.mu.Lock()
+	defer it.mu.Unlock()
+	return it.modified
+}
+
 // VisitTargets traverses all the targets of all tables and replaces each with
 // transform(target).
 func (it *IPTables) VisitTargets(transform func(Target) Target) {
@@ -235,11 +243,31 @@ type IPHeaderFilter struct {
 	OutputInterfaceInvert bool
 }
 
+// EmptyFilter4 returns an initialized IPv4 header filter.
+func EmptyFilter4() IPHeaderFilter {
+	return IPHeaderFilter{
+		Dst:     tcpip.AddrFrom4([4]byte{}),
+		DstMask: tcpip.AddrFrom4([4]byte{}),
+		Src:     tcpip.AddrFrom4([4]byte{}),
+		SrcMask: tcpip.AddrFrom4([4]byte{}),
+	}
+}
+
+// EmptyFilter6 returns an initialized IPv6 header filter.
+func EmptyFilter6() IPHeaderFilter {
+	return IPHeaderFilter{
+		Dst:     tcpip.AddrFrom16([16]byte{}),
+		DstMask: tcpip.AddrFrom16([16]byte{}),
+		Src:     tcpip.AddrFrom16([16]byte{}),
+		SrcMask: tcpip.AddrFrom16([16]byte{}),
+	}
+}
+
 // match returns whether pkt matches the filter.
 //
 // Preconditions: pkt.NetworkHeader is set and is at least of the minimal IPv4
 // or IPv6 header length.
-func (fl IPHeaderFilter) match(pkt PacketBufferPtr, hook Hook, inNicName, outNicName string) bool {
+func (fl IPHeaderFilter) match(pkt *PacketBuffer, hook Hook, inNicName, outNicName string) bool {
 	// Extract header fields.
 	var (
 		transProto tcpip.TransportProtocolNumber
@@ -347,7 +375,7 @@ type Matcher interface {
 	// used for suspicious packets.
 	//
 	// Precondition: packet.NetworkHeader is set.
-	Match(hook Hook, packet PacketBufferPtr, inputInterfaceName, outputInterfaceName string) (matches bool, hotdrop bool)
+	Match(hook Hook, packet *PacketBuffer, inputInterfaceName, outputInterfaceName string) (matches bool, hotdrop bool)
 }
 
 // A Target is the interface for taking an action for a packet.
@@ -355,5 +383,5 @@ type Target interface {
 	// Action takes an action on the packet and returns a verdict on how
 	// traversal should (or should not) continue. If the return value is
 	// Jump, it also returns the index of the rule to jump to.
-	Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int)
+	Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int)
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/multi_port_endpoint_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/multi_port_endpoint_mutex.go
index 7e2d5818..1038997b 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/multi_port_endpoint_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/multi_port_endpoint_mutex.go
@@ -17,7 +17,7 @@ type multiPortEndpointRWMutex struct {
 var multiPortEndpointlockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type multiPortEndpointlockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go
index b38bef4e..fb01e305 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go
@@ -31,6 +31,24 @@ type NeighborStats struct {
 	UnreachableEntryLookups *tcpip.StatCounter
 }
 
+// +stateify savable
+type dynamicCacheEntry struct {
+	lru neighborEntryList
+
+	// count tracks the amount of dynamic entries in the cache. This is
+	// needed since static entries do not count towards the LRU cache
+	// eviction strategy.
+	count uint16
+}
+
+// +stateify savable
+type neighborCacheMu struct {
+	neighborCacheRWMutex `state:"nosave"`
+
+	cache   map[tcpip.Address]*neighborEntry
+	dynamic dynamicCacheEntry
+}
+
 // neighborCache maps IP addresses to link addresses. It uses the Least
 // Recently Used (LRU) eviction strategy to implement a bounded cache for
 // dynamically acquired entries. It contains the state machine and configuration
@@ -43,24 +61,13 @@ type NeighborStats struct {
 //  2. Static entries are explicitly added by a user and have no expiration.
 //     Their state is always Static. The amount of static entries stored in the
 //     cache is unbounded.
+//
+// +stateify savable
 type neighborCache struct {
 	nic     *nic
 	state   *NUDState
 	linkRes LinkAddressResolver
-
-	mu struct {
-		neighborCacheRWMutex
-
-		cache   map[tcpip.Address]*neighborEntry
-		dynamic struct {
-			lru neighborEntryList
-
-			// count tracks the amount of dynamic entries in the cache. This is
-			// needed since static entries do not count towards the LRU cache
-			// eviction strategy.
-			count uint16
-		}
-	}
+	mu      neighborCacheMu
 }
 
 // getOrCreateEntry retrieves a cache entry associated with addr. The
@@ -247,7 +254,7 @@ func (n *neighborCache) clear() {
 	}
 
 	n.mu.dynamic.lru = neighborEntryList{}
-	n.mu.cache = make(map[tcpip.Address]*neighborEntry)
+	clear(n.mu.cache)
 	n.mu.dynamic.count = 0
 }
 
@@ -298,7 +305,7 @@ func (n *neighborCache) handleConfirmation(addr tcpip.Address, linkAddr tcpip.Li
 func (n *neighborCache) init(nic *nic, r LinkAddressResolver) {
 	*n = neighborCache{
 		nic:     nic,
-		state:   NewNUDState(nic.stack.nudConfigs, nic.stack.clock, nic.stack.randomGenerator),
+		state:   NewNUDState(nic.stack.nudConfigs, nic.stack.clock, nic.stack.insecureRNG),
 		linkRes: r,
 	}
 	n.mu.Lock()
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache_mutex.go
index 290e48b5..0de0fea6 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache_mutex.go
@@ -17,7 +17,7 @@ type neighborCacheRWMutex struct {
 var neighborCachelockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type neighborCachelockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry_mutex.go
index f8be1dae..c6b08eb8 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry_mutex.go
@@ -17,7 +17,7 @@ type neighborEntryRWMutex struct {
 var neighborEntrylockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type neighborEntrylockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go
index 3e8a0a3e..a4fcc8fb 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go
@@ -23,6 +23,7 @@ import (
 	"gvisor.dev/gvisor/pkg/tcpip/header"
 )
 
+// +stateify savable
 type linkResolver struct {
 	resolver LinkAddressResolver
 
@@ -34,6 +35,8 @@ var _ NetworkDispatcher = (*nic)(nil)
 
 // nic represents a "network interface card" to which the networking stack is
 // attached.
+//
+// +stateify savable
 type nic struct {
 	NetworkLinkEndpoint
 
@@ -47,7 +50,7 @@ type nic struct {
 	// enableDisableMu is used to synchronize attempts to enable/disable the NIC.
 	// Without this mutex, calls to enable/disable the NIC may interleave and
 	// leave the NIC in an inconsistent state.
-	enableDisableMu nicRWMutex
+	enableDisableMu nicRWMutex `state:"nosave"`
 
 	// The network endpoints themselves may be modified by calling the interface's
 	// methods, but the map reference and entries must be constant.
@@ -69,7 +72,7 @@ type nic struct {
 	linkResQueue packetsPendingLinkResolution
 
 	// packetEPsMu protects annotated fields below.
-	packetEPsMu packetEPsRWMutex
+	packetEPsMu packetEPsRWMutex `state:"nosave"`
 
 	// eps is protected by the mutex, but the values contained in it are not.
 	//
@@ -78,7 +81,15 @@ type nic struct {
 
 	qDisc QueueingDiscipline
 
-	gro groDispatcher
+	// deliverLinkPackets specifies whether this NIC delivers packets to
+	// packet sockets. It is immutable.
+	//
+	// deliverLinkPackets is off by default because some users already
+	// deliver link packets by explicitly calling nic.DeliverLinkPackets.
+	deliverLinkPackets bool
+
+	// Primary is the main controlling interface in a bonded setup.
+	Primary *nic
 }
 
 // makeNICStats initializes the NIC statistics and associates them to the global
@@ -90,6 +101,7 @@ func makeNICStats(global tcpip.NICStats) sharedStats {
 	return stats
 }
 
+// +stateify savable
 type packetEndpointList struct {
 	mu packetEndpointListRWMutex
 
@@ -133,6 +145,7 @@ func (p *packetEndpointList) forEach(fn func(PacketEndpoint)) {
 
 var _ QueueingDiscipline = (*delegatingQueueingDiscipline)(nil)
 
+// +stateify savable
 type delegatingQueueingDiscipline struct {
 	LinkWriter
 }
@@ -140,7 +153,7 @@ type delegatingQueueingDiscipline struct {
 func (*delegatingQueueingDiscipline) Close() {}
 
 // WritePacket passes the packet through to the underlying LinkWriter's WritePackets.
-func (qDisc *delegatingQueueingDiscipline) WritePacket(pkt PacketBufferPtr) tcpip.Error {
+func (qDisc *delegatingQueueingDiscipline) WritePacket(pkt *PacketBuffer) tcpip.Error {
 	var pkts PacketBufferList
 	pkts.PushBack(pkt)
 	_, err := qDisc.LinkWriter.WritePackets(pkts)
@@ -174,6 +187,7 @@ func newNIC(stack *Stack, id tcpip.NICID, ep LinkEndpoint, opts NICOptions) *nic
 		linkAddrResolvers:         make(map[tcpip.NetworkProtocolNumber]*linkResolver),
 		duplicateAddressDetectors: make(map[tcpip.NetworkProtocolNumber]DuplicateAddressDetector),
 		qDisc:                     qDisc,
+		deliverLinkPackets:        opts.DeliverLinkPackets,
 	}
 	nic.linkResQueue.init(nic)
 
@@ -202,7 +216,6 @@ func newNIC(stack *Stack, id tcpip.NICID, ep LinkEndpoint, opts NICOptions) *nic
 		}
 	}
 
-	nic.gro.init(opts.GROTimeout)
 	nic.NetworkLinkEndpoint.Attach(nic)
 
 	return nic
@@ -298,7 +311,10 @@ func (n *nic) enable() tcpip.Error {
 // remove detaches NIC from the link endpoint and releases network endpoint
 // resources. This guarantees no packets between this NIC and the network
 // stack.
-func (n *nic) remove() tcpip.Error {
+//
+// It returns an action that has to be excuted after releasing the Stack lock
+// and any error encountered.
+func (n *nic) remove(closeLinkEndpoint bool) (func(), tcpip.Error) {
 	n.enableDisableMu.Lock()
 
 	n.disableLocked()
@@ -309,18 +325,24 @@ func (n *nic) remove() tcpip.Error {
 
 	n.enableDisableMu.Unlock()
 
-	// Shutdown GRO.
-	n.gro.close()
-
 	// Drain and drop any packets pending link resolution.
 	// We must not hold n.enableDisableMu here.
 	n.linkResQueue.cancel()
 
+	var deferAct func()
 	// Prevent packets from going down to the link before shutting the link down.
 	n.qDisc.Close()
 	n.NetworkLinkEndpoint.Attach(nil)
+	if closeLinkEndpoint {
+		ep := n.NetworkLinkEndpoint
+		ep.SetOnCloseAction(nil)
+		// The link endpoint has to be closed without holding a
+		// netstack lock, because it can trigger other netstack
+		// operations.
+		deferAct = ep.Close
+	}
 
-	return nil
+	return deferAct, nil
 }
 
 // setPromiscuousMode enables or disables promiscuous mode.
@@ -339,7 +361,7 @@ func (n *nic) IsLoopback() bool {
 }
 
 // WritePacket implements NetworkEndpoint.
-func (n *nic) WritePacket(r *Route, pkt PacketBufferPtr) tcpip.Error {
+func (n *nic) WritePacket(r *Route, pkt *PacketBuffer) tcpip.Error {
 	routeInfo, _, err := r.resolvedFields(nil)
 	switch err.(type) {
 	case nil:
@@ -370,7 +392,7 @@ func (n *nic) WritePacket(r *Route, pkt PacketBufferPtr) tcpip.Error {
 }
 
 // WritePacketToRemote implements NetworkInterface.
-func (n *nic) WritePacketToRemote(remoteLinkAddr tcpip.LinkAddress, pkt PacketBufferPtr) tcpip.Error {
+func (n *nic) WritePacketToRemote(remoteLinkAddr tcpip.LinkAddress, pkt *PacketBuffer) tcpip.Error {
 	pkt.EgressRoute = RouteInfo{
 		routeInfo: routeInfo{
 			NetProto:         pkt.NetworkProtocolNumber,
@@ -381,21 +403,26 @@ func (n *nic) WritePacketToRemote(remoteLinkAddr tcpip.LinkAddress, pkt PacketBu
 	return n.writePacket(pkt)
 }
 
-func (n *nic) writePacket(pkt PacketBufferPtr) tcpip.Error {
+func (n *nic) writePacket(pkt *PacketBuffer) tcpip.Error {
 	n.NetworkLinkEndpoint.AddHeader(pkt)
 	return n.writeRawPacket(pkt)
 }
 
-func (n *nic) writeRawPacketWithLinkHeaderInPayload(pkt PacketBufferPtr) tcpip.Error {
+func (n *nic) writeRawPacketWithLinkHeaderInPayload(pkt *PacketBuffer) tcpip.Error {
 	if !n.NetworkLinkEndpoint.ParseHeader(pkt) {
 		return &tcpip.ErrMalformedHeader{}
 	}
 	return n.writeRawPacket(pkt)
 }
 
-func (n *nic) writeRawPacket(pkt PacketBufferPtr) tcpip.Error {
+func (n *nic) writeRawPacket(pkt *PacketBuffer) tcpip.Error {
 	// Always an outgoing packet.
 	pkt.PktType = tcpip.PacketOutgoing
+
+	if n.deliverLinkPackets {
+		n.DeliverLinkPacket(pkt.NetworkProtocolNumber, pkt)
+	}
+
 	if err := n.qDisc.WritePacket(pkt); err != nil {
 		if _, ok := err.(*tcpip.ErrNoBufferSpace); ok {
 			n.stats.txPacketsDroppedNoBufferSpace.Increment()
@@ -420,7 +447,7 @@ func (n *nic) Spoofing() bool {
 
 // primaryAddress returns an address that can be used to communicate with
 // remoteAddr.
-func (n *nic) primaryEndpoint(protocol tcpip.NetworkProtocolNumber, remoteAddr tcpip.Address) AssignableAddressEndpoint {
+func (n *nic) primaryEndpoint(protocol tcpip.NetworkProtocolNumber, remoteAddr, srcHint tcpip.Address) AssignableAddressEndpoint {
 	ep := n.getNetworkEndpoint(protocol)
 	if ep == nil {
 		return nil
@@ -431,7 +458,7 @@ func (n *nic) primaryEndpoint(protocol tcpip.NetworkProtocolNumber, remoteAddr t
 		return nil
 	}
 
-	return addressableEndpoint.AcquireOutgoingPrimaryAddress(remoteAddr, n.Spoofing())
+	return addressableEndpoint.AcquireOutgoingPrimaryAddress(remoteAddr, srcHint, n.Spoofing())
 }
 
 type getAddressBehaviour int
@@ -498,7 +525,7 @@ func (n *nic) getAddressOrCreateTempInner(protocol tcpip.NetworkProtocolNumber,
 		return nil
 	}
 
-	return addressableEndpoint.AcquireAssignedAddress(address, createTemp, peb)
+	return addressableEndpoint.AcquireAssignedAddress(address, createTemp, peb, false)
 }
 
 // addAddress adds a new address to n, so that it starts accepting packets
@@ -715,7 +742,7 @@ func (n *nic) isInGroup(addr tcpip.Address) bool {
 // DeliverNetworkPacket finds the appropriate network protocol endpoint and
 // hands the packet over for further processing. This function is called when
 // the NIC receives a packet from the link endpoint.
-func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) {
+func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) {
 	enabled := n.Enabled()
 	// If the NIC is not yet enabled, don't receive any packets.
 	if !enabled {
@@ -735,19 +762,23 @@ func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt Pac
 
 	pkt.RXChecksumValidated = n.NetworkLinkEndpoint.Capabilities()&CapabilityRXChecksumOffload != 0
 
-	n.gro.dispatch(pkt, protocol, networkEndpoint)
+	if n.deliverLinkPackets {
+		n.DeliverLinkPacket(protocol, pkt)
+	}
+
+	networkEndpoint.HandlePacket(pkt)
 }
 
-func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) {
+func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) {
 	// Deliver to interested packet endpoints without holding NIC lock.
-	var packetEPPkt PacketBufferPtr
+	var packetEPPkt *PacketBuffer
 	defer func() {
-		if !packetEPPkt.IsNil() {
+		if packetEPPkt != nil {
 			packetEPPkt.DecRef()
 		}
 	}()
 	deliverPacketEPs := func(ep PacketEndpoint) {
-		if packetEPPkt.IsNil() {
+		if packetEPPkt == nil {
 			// Packet endpoints hold the full packet.
 			//
 			// We perform a deep copy because higher-level endpoints may point to
@@ -797,7 +828,7 @@ func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt Packet
 
 // DeliverTransportPacket delivers the packets to the appropriate transport
 // protocol endpoint.
-func (n *nic) DeliverTransportPacket(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr) TransportPacketDisposition {
+func (n *nic) DeliverTransportPacket(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer) TransportPacketDisposition {
 	state, ok := n.stack.transportProtocols[protocol]
 	if !ok {
 		n.stats.unknownL4ProtocolRcvdPacketCounts.Increment(uint64(protocol))
@@ -857,7 +888,7 @@ func (n *nic) DeliverTransportPacket(protocol tcpip.TransportProtocolNumber, pkt
 }
 
 // DeliverTransportError implements TransportDispatcher.
-func (n *nic) DeliverTransportError(local, remote tcpip.Address, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, transErr TransportError, pkt PacketBufferPtr) {
+func (n *nic) DeliverTransportError(local, remote tcpip.Address, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, transErr TransportError, pkt *PacketBuffer) {
 	state, ok := n.stack.transportProtocols[trans]
 	if !ok {
 		return
@@ -885,7 +916,7 @@ func (n *nic) DeliverTransportError(local, remote tcpip.Address, net tcpip.Netwo
 }
 
 // DeliverRawPacket implements TransportDispatcher.
-func (n *nic) DeliverRawPacket(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr) {
+func (n *nic) DeliverRawPacket(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer) {
 	// For ICMPv4 only we validate the header length for compatibility with
 	// raw(7) ICMP_FILTER. The same check is made in Linux here:
 	// https://github.com/torvalds/linux/blob/70585216/net/ipv4/raw.c#L189.
@@ -928,7 +959,7 @@ func (n *nic) setNUDConfigs(protocol tcpip.NetworkProtocolNumber, c NUDConfigura
 	return &tcpip.ErrNotSupported{}
 }
 
-func (n *nic) registerPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) tcpip.Error {
+func (n *nic) registerPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) {
 	n.packetEPsMu.Lock()
 	defer n.packetEPsMu.Unlock()
 
@@ -938,8 +969,6 @@ func (n *nic) registerPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep Pa
 		n.packetEPs[netProto] = eps
 	}
 	eps.add(ep)
-
-	return nil
 }
 
 func (n *nic) unregisterPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) {
@@ -1065,3 +1094,11 @@ func (n *nic) multicastForwarding(protocol tcpip.NetworkProtocolNumber) (bool, t
 
 	return ep.MulticastForwarding(), nil
 }
+
+// CoordinatorNIC represents NetworkLinkEndpoint that can join multiple network devices.
+type CoordinatorNIC interface {
+	// AddNIC adds the specified NIC device.
+	AddNIC(n *nic) tcpip.Error
+	// DelNIC deletes the specified NIC device.
+	DelNIC(n *nic) tcpip.Error
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_mutex.go
index 95bfb301..e3b2332a 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_mutex.go
@@ -17,7 +17,7 @@ type nicRWMutex struct {
 var niclockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type niclockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_stats.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_stats.go
index aa336545..38081682 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_stats.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_stats.go
@@ -18,6 +18,7 @@ import (
 	"gvisor.dev/gvisor/pkg/tcpip"
 )
 
+// +stateify savable
 type sharedStats struct {
 	local tcpip.NICStats
 	multiCounterNICStats
@@ -25,6 +26,7 @@ type sharedStats struct {
 
 // LINT.IfChange(multiCounterNICPacketStats)
 
+// +stateify savable
 type multiCounterNICPacketStats struct {
 	packets tcpip.MultiCounterStat
 	bytes   tcpip.MultiCounterStat
@@ -39,6 +41,7 @@ func (m *multiCounterNICPacketStats) init(a, b *tcpip.NICPacketStats) {
 
 // LINT.IfChange(multiCounterNICNeighborStats)
 
+// +stateify savable
 type multiCounterNICNeighborStats struct {
 	unreachableEntryLookups                    tcpip.MultiCounterStat
 	droppedConfirmationForNoninitiatedNeighbor tcpip.MultiCounterStat
@@ -55,6 +58,7 @@ func (m *multiCounterNICNeighborStats) init(a, b *tcpip.NICNeighborStats) {
 
 // LINT.IfChange(multiCounterNICStats)
 
+// +stateify savable
 type multiCounterNICStats struct {
 	unknownL3ProtocolRcvdPacketCounts tcpip.MultiIntegralStatCounterMap
 	unknownL4ProtocolRcvdPacketCounts tcpip.MultiIntegralStatCounterMap
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nud.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nud.go
index ae586e52..0c9c6cc8 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nud.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nud.go
@@ -165,6 +165,8 @@ type ReachabilityConfirmationFlags struct {
 // NUDConfigurations is the NUD configurations for the netstack. This is used
 // by the neighbor cache to operate the NUD state machine on each device in the
 // local network.
+//
+// +stateify savable
 type NUDConfigurations struct {
 	// BaseReachableTime is the base duration for computing the random reachable
 	// time.
@@ -314,26 +316,31 @@ func calcMaxRandomFactor(minRandomFactor float32) float32 {
 	return defaultMaxRandomFactor
 }
 
-// NUDState stores states needed for calculating reachable time.
-type NUDState struct {
-	clock tcpip.Clock
-	rng   *rand.Rand
+// +stateify savable
+type nudStateMu struct {
+	sync.RWMutex `state:"nosave"`
 
-	mu struct {
-		sync.RWMutex
+	config NUDConfigurations
 
-		config NUDConfigurations
+	// reachableTime is the duration to wait for a REACHABLE entry to
+	// transition into STALE after inactivity. This value is calculated with
+	// the algorithm defined in RFC 4861 section 6.3.2.
+	reachableTime time.Duration
 
-		// reachableTime is the duration to wait for a REACHABLE entry to
-		// transition into STALE after inactivity. This value is calculated with
-		// the algorithm defined in RFC 4861 section 6.3.2.
-		reachableTime time.Duration
+	expiration            tcpip.MonotonicTime
+	prevBaseReachableTime time.Duration
+	prevMinRandomFactor   float32
+	prevMaxRandomFactor   float32
+}
 
-		expiration            tcpip.MonotonicTime
-		prevBaseReachableTime time.Duration
-		prevMinRandomFactor   float32
-		prevMaxRandomFactor   float32
-	}
+// NUDState stores states needed for calculating reachable time.
+//
+// +stateify savable
+type NUDState struct {
+	clock tcpip.Clock
+	// TODO(b/341946753): Restore when netstack is savable.
+	rng *rand.Rand `state:"nosave"`
+	mu  nudStateMu
 }
 
 // NewNUDState returns new NUDState using c as configuration and the specified
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go
index 86b75695..24956e71 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go
@@ -58,9 +58,6 @@ type PacketBufferOptions struct {
 	OnRelease func()
 }
 
-// PacketBufferPtr is a pointer to a PacketBuffer.
-type PacketBufferPtr = *PacketBuffer
-
 // A PacketBuffer contains all the data of a network packet.
 //
 // As a PacketBuffer traverses up the stack, it may be necessary to pass it to
@@ -94,7 +91,7 @@ type PacketBufferPtr = *PacketBuffer
 //
 // Outgoing Packet: When a header is pushed, `pushed` gets incremented by the
 // pushed length, and the current value is stored for each header. PacketBuffer
-// substracts this value from `reserved` to compute the starting offset of each
+// subtracts this value from `reserved` to compute the starting offset of each
 // header in `buf`.
 //
 // Incoming Packet: When a header is consumed (a.k.a. parsed), the current
@@ -172,7 +169,7 @@ type PacketBuffer struct {
 }
 
 // NewPacketBuffer creates a new PacketBuffer with opts.
-func NewPacketBuffer(opts PacketBufferOptions) PacketBufferPtr {
+func NewPacketBuffer(opts PacketBufferOptions) *PacketBuffer {
 	pk := pkPool.Get().(*PacketBuffer)
 	pk.reset()
 	if opts.ReserveHeaderBytes != 0 {
@@ -190,7 +187,7 @@ func NewPacketBuffer(opts PacketBufferOptions) PacketBufferPtr {
 }
 
 // IncRef increments the PacketBuffer's refcount.
-func (pk PacketBufferPtr) IncRef() PacketBufferPtr {
+func (pk *PacketBuffer) IncRef() *PacketBuffer {
 	pk.packetBufferRefs.IncRef()
 	return pk
 }
@@ -198,7 +195,7 @@ func (pk PacketBufferPtr) IncRef() PacketBufferPtr {
 // DecRef decrements the PacketBuffer's refcount. If the refcount is
 // decremented to zero, the PacketBuffer is returned to the PacketBuffer
 // pool.
-func (pk PacketBufferPtr) DecRef() {
+func (pk *PacketBuffer) DecRef() {
 	pk.packetBufferRefs.DecRef(func() {
 		if pk.onRelease != nil {
 			pk.onRelease()
@@ -209,24 +206,24 @@ func (pk PacketBufferPtr) DecRef() {
 	})
 }
 
-func (pk PacketBufferPtr) reset() {
+func (pk *PacketBuffer) reset() {
 	*pk = PacketBuffer{}
 }
 
 // ReservedHeaderBytes returns the number of bytes initially reserved for
 // headers.
-func (pk PacketBufferPtr) ReservedHeaderBytes() int {
+func (pk *PacketBuffer) ReservedHeaderBytes() int {
 	return pk.reserved
 }
 
 // AvailableHeaderBytes returns the number of bytes currently available for
 // headers. This is relevant to PacketHeader.Push method only.
-func (pk PacketBufferPtr) AvailableHeaderBytes() int {
+func (pk *PacketBuffer) AvailableHeaderBytes() int {
 	return pk.reserved - pk.pushed
 }
 
 // VirtioNetHeader returns the handle to virtio-layer header.
-func (pk PacketBufferPtr) VirtioNetHeader() PacketHeader {
+func (pk *PacketBuffer) VirtioNetHeader() PacketHeader {
 	return PacketHeader{
 		pk:  pk,
 		typ: virtioNetHeader,
@@ -234,7 +231,7 @@ func (pk PacketBufferPtr) VirtioNetHeader() PacketHeader {
 }
 
 // LinkHeader returns the handle to link-layer header.
-func (pk PacketBufferPtr) LinkHeader() PacketHeader {
+func (pk *PacketBuffer) LinkHeader() PacketHeader {
 	return PacketHeader{
 		pk:  pk,
 		typ: linkHeader,
@@ -242,7 +239,7 @@ func (pk PacketBufferPtr) LinkHeader() PacketHeader {
 }
 
 // NetworkHeader returns the handle to network-layer header.
-func (pk PacketBufferPtr) NetworkHeader() PacketHeader {
+func (pk *PacketBuffer) NetworkHeader() PacketHeader {
 	return PacketHeader{
 		pk:  pk,
 		typ: networkHeader,
@@ -250,7 +247,7 @@ func (pk PacketBufferPtr) NetworkHeader() PacketHeader {
 }
 
 // TransportHeader returns the handle to transport-layer header.
-func (pk PacketBufferPtr) TransportHeader() PacketHeader {
+func (pk *PacketBuffer) TransportHeader() PacketHeader {
 	return PacketHeader{
 		pk:  pk,
 		typ: transportHeader,
@@ -258,29 +255,33 @@ func (pk PacketBufferPtr) TransportHeader() PacketHeader {
 }
 
 // HeaderSize returns the total size of all headers in bytes.
-func (pk PacketBufferPtr) HeaderSize() int {
+func (pk *PacketBuffer) HeaderSize() int {
 	return pk.pushed + pk.consumed
 }
 
 // Size returns the size of packet in bytes.
-func (pk PacketBufferPtr) Size() int {
+func (pk *PacketBuffer) Size() int {
 	return int(pk.buf.Size()) - pk.headerOffset()
 }
 
 // MemSize returns the estimation size of the pk in memory, including backing
 // buffer data.
-func (pk PacketBufferPtr) MemSize() int {
+func (pk *PacketBuffer) MemSize() int {
 	return int(pk.buf.Size()) + PacketBufferStructSize
 }
 
 // Data returns the handle to data portion of pk.
-func (pk PacketBufferPtr) Data() PacketData {
+func (pk *PacketBuffer) Data() PacketData {
 	return PacketData{pk: pk}
 }
 
 // AsSlices returns the underlying storage of the whole packet.
-func (pk PacketBufferPtr) AsSlices() [][]byte {
-	var views [][]byte
+//
+// Note that AsSlices can allocate a lot. In hot paths it may be preferable to
+// iterate over a PacketBuffer's data via AsViewList.
+func (pk *PacketBuffer) AsSlices() [][]byte {
+	vl := pk.buf.AsViewList()
+	views := make([][]byte, 0, vl.Len())
 	offset := pk.headerOffset()
 	pk.buf.SubApply(offset, int(pk.buf.Size())-offset, func(v *buffer.View) {
 		views = append(views, v.AsSlice())
@@ -288,9 +289,15 @@ func (pk PacketBufferPtr) AsSlices() [][]byte {
 	return views
 }
 
+// AsViewList returns the list of Views backing the PacketBuffer along with the
+// header offset into them. Users may not save or modify the ViewList returned.
+func (pk *PacketBuffer) AsViewList() (buffer.ViewList, int) {
+	return pk.buf.AsViewList(), pk.headerOffset()
+}
+
 // ToBuffer returns a caller-owned copy of the underlying storage of the whole
 // packet.
-func (pk PacketBufferPtr) ToBuffer() buffer.Buffer {
+func (pk *PacketBuffer) ToBuffer() buffer.Buffer {
 	b := pk.buf.Clone()
 	b.TrimFront(int64(pk.headerOffset()))
 	return b
@@ -298,7 +305,7 @@ func (pk PacketBufferPtr) ToBuffer() buffer.Buffer {
 
 // ToView returns a caller-owned copy of the underlying storage of the whole
 // packet as a view.
-func (pk PacketBufferPtr) ToView() *buffer.View {
+func (pk *PacketBuffer) ToView() *buffer.View {
 	p := buffer.NewView(int(pk.buf.Size()))
 	offset := pk.headerOffset()
 	pk.buf.SubApply(offset, int(pk.buf.Size())-offset, func(v *buffer.View) {
@@ -307,19 +314,19 @@ func (pk PacketBufferPtr) ToView() *buffer.View {
 	return p
 }
 
-func (pk PacketBufferPtr) headerOffset() int {
+func (pk *PacketBuffer) headerOffset() int {
 	return pk.reserved - pk.pushed
 }
 
-func (pk PacketBufferPtr) headerOffsetOf(typ headerType) int {
+func (pk *PacketBuffer) headerOffsetOf(typ headerType) int {
 	return pk.reserved + pk.headers[typ].offset
 }
 
-func (pk PacketBufferPtr) dataOffset() int {
+func (pk *PacketBuffer) dataOffset() int {
 	return pk.reserved + pk.consumed
 }
 
-func (pk PacketBufferPtr) push(typ headerType, size int) []byte {
+func (pk *PacketBuffer) push(typ headerType, size int) []byte {
 	h := &pk.headers[typ]
 	if h.length > 0 {
 		panic(fmt.Sprintf("push(%s, %d) called after previous push", typ, size))
@@ -334,7 +341,7 @@ func (pk PacketBufferPtr) push(typ headerType, size int) []byte {
 	return view.AsSlice()
 }
 
-func (pk PacketBufferPtr) consume(typ headerType, size int) (v []byte, consumed bool) {
+func (pk *PacketBuffer) consume(typ headerType, size int) (v []byte, consumed bool) {
 	h := &pk.headers[typ]
 	if h.length > 0 {
 		panic(fmt.Sprintf("consume must not be called twice: type %s", typ))
@@ -349,7 +356,7 @@ func (pk PacketBufferPtr) consume(typ headerType, size int) (v []byte, consumed
 	return view.AsSlice(), true
 }
 
-func (pk PacketBufferPtr) headerView(typ headerType) buffer.View {
+func (pk *PacketBuffer) headerView(typ headerType) buffer.View {
 	h := &pk.headers[typ]
 	if h.length == 0 {
 		return buffer.View{}
@@ -363,7 +370,7 @@ func (pk PacketBufferPtr) headerView(typ headerType) buffer.View {
 
 // Clone makes a semi-deep copy of pk. The underlying packet payload is
 // shared. Hence, no modifications is done to underlying packet payload.
-func (pk PacketBufferPtr) Clone() PacketBufferPtr {
+func (pk *PacketBuffer) Clone() *PacketBuffer {
 	newPk := pkPool.Get().(*PacketBuffer)
 	newPk.reset()
 	newPk.buf = pk.buf.Clone()
@@ -389,7 +396,7 @@ func (pk PacketBufferPtr) Clone() PacketBufferPtr {
 
 // ReserveHeaderBytes prepends reserved space for headers at the front
 // of the underlying buf. Can only be called once per packet.
-func (pk PacketBufferPtr) ReserveHeaderBytes(reserved int) {
+func (pk *PacketBuffer) ReserveHeaderBytes(reserved int) {
 	if pk.reserved != 0 {
 		panic(fmt.Sprintf("ReserveHeaderBytes(...) called on packet with reserved=%d, want reserved=0", pk.reserved))
 	}
@@ -400,7 +407,7 @@ func (pk PacketBufferPtr) ReserveHeaderBytes(reserved int) {
 // Network returns the network header as a header.Network.
 //
 // Network should only be called when NetworkHeader has been set.
-func (pk PacketBufferPtr) Network() header.Network {
+func (pk *PacketBuffer) Network() header.Network {
 	switch netProto := pk.NetworkProtocolNumber; netProto {
 	case header.IPv4ProtocolNumber:
 		return header.IPv4(pk.NetworkHeader().Slice())
@@ -416,7 +423,7 @@ func (pk PacketBufferPtr) Network() header.Network {
 //
 // See PacketBuffer.Data for details about how a packet buffer holds an inbound
 // packet.
-func (pk PacketBufferPtr) CloneToInbound() PacketBufferPtr {
+func (pk *PacketBuffer) CloneToInbound() *PacketBuffer {
 	newPk := pkPool.Get().(*PacketBuffer)
 	newPk.reset()
 	newPk.buf = pk.buf.Clone()
@@ -432,7 +439,7 @@ func (pk PacketBufferPtr) CloneToInbound() PacketBufferPtr {
 //
 // The returned packet buffer will have the network and transport headers
 // set if the original packet buffer did.
-func (pk PacketBufferPtr) DeepCopyForForwarding(reservedHeaderBytes int) PacketBufferPtr {
+func (pk *PacketBuffer) DeepCopyForForwarding(reservedHeaderBytes int) *PacketBuffer {
 	payload := BufferSince(pk.NetworkHeader())
 	defer payload.Release()
 	newPk := NewPacketBuffer(PacketBufferOptions{
@@ -462,11 +469,6 @@ func (pk PacketBufferPtr) DeepCopyForForwarding(reservedHeaderBytes int) PacketB
 	return newPk
 }
 
-// IsNil returns whether the pointer is logically nil.
-func (pk PacketBufferPtr) IsNil() bool {
-	return pk == nil
-}
-
 // headerInfo stores metadata about a header in a packet.
 //
 // +stateify savable
@@ -481,7 +483,7 @@ type headerInfo struct {
 
 // PacketHeader is a handle object to a header in the underlying packet.
 type PacketHeader struct {
-	pk  PacketBufferPtr
+	pk  *PacketBuffer
 	typ headerType
 }
 
@@ -523,7 +525,7 @@ func (h PacketHeader) Consume(size int) (v []byte, consumed bool) {
 //
 // +stateify savable
 type PacketData struct {
-	pk PacketBufferPtr
+	pk *PacketBuffer
 }
 
 // PullUp returns a contiguous slice of size bytes from the beginning of d.
@@ -601,7 +603,7 @@ func (d PacketData) MergeBuffer(b *buffer.Buffer) {
 
 // MergeFragment appends the data portion of frag to dst. It modifies
 // frag and frag should not be used again.
-func MergeFragment(dst, frag PacketBufferPtr) {
+func MergeFragment(dst, frag *PacketBuffer) {
 	frag.buf.TrimFront(int64(frag.dataOffset()))
 	dst.buf.Merge(&frag.buf)
 }
@@ -674,7 +676,7 @@ func (d PacketData) ChecksumAtOffset(offset int) uint16 {
 
 // Range represents a contiguous subportion of a PacketBuffer.
 type Range struct {
-	pk     PacketBufferPtr
+	pk     *PacketBuffer
 	offset int
 	length int
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_list.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_list.go
index 226b3e49..363059a9 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_list.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_list.go
@@ -62,6 +62,18 @@ func (pl *PacketBufferList) PushBack(pb *PacketBuffer) {
 	pl.pbs = append(pl.pbs, pb)
 }
 
+// PopFront removes the first element in the list if it exists and returns it.
+//
+//go:nosplit
+func (pl *PacketBufferList) PopFront() *PacketBuffer {
+	if len(pl.pbs) == 0 {
+		return nil
+	}
+	pkt := pl.pbs[0]
+	pl.pbs = pl.pbs[1:]
+	return pkt
+}
+
 // DecRef decreases the reference count on each PacketBuffer
 // stored in the list.
 //
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go
index 8b226b73..a3a85693 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go
@@ -1,6 +1,7 @@
 package stack
 
 import (
+	"context"
 	"fmt"
 
 	"gvisor.dev/gvisor/pkg/atomicbitops"
@@ -134,7 +135,7 @@ func (r *packetBufferRefs) DecRef(destroy func()) {
 	}
 }
 
-func (r *packetBufferRefs) afterLoad() {
+func (r *packetBufferRefs) afterLoad(context.Context) {
 	if r.ReadRefs() > 0 {
 		refs.Register(r)
 	}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go
index ddfb8004..9d1105b2 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go
@@ -21,8 +21,8 @@ const PacketBufferStructSize = int(unsafe.Sizeof(PacketBuffer{}))
 
 // ID returns a unique ID for the underlying storage of the packet.
 //
-// Two PacketBufferPtrs have the same IDs if and only if they point to the same
+// Two *PacketBuffers have the same IDs if and only if they point to the same
 // location in memory.
-func (pk PacketBufferPtr) ID() uintptr {
+func (pk *PacketBuffer) ID() uintptr {
 	return uintptr(unsafe.Pointer(pk))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_endpoint_list_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_endpoint_list_mutex.go
index c7e6ef64..ad3e0b28 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_endpoint_list_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_endpoint_list_mutex.go
@@ -17,7 +17,7 @@ type packetEndpointListRWMutex struct {
 var packetEndpointListlockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type packetEndpointListlockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_eps_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_eps_mutex.go
index 2c7d2d9d..4e9dda8b 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_eps_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_eps_mutex.go
@@ -17,7 +17,7 @@ type packetEPsRWMutex struct {
 var packetEPslockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type packetEPslockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packets_pending_link_resolution_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packets_pending_link_resolution_mutex.go
index c5660882..ac47a79e 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packets_pending_link_resolution_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packets_pending_link_resolution_mutex.go
@@ -19,7 +19,7 @@ var packetsPendingLinkResolutionprefixIndex *locking.MutexClass
 var packetsPendingLinkResolutionlockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type packetsPendingLinkResolutionlockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/pending_packets.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/pending_packets.go
index 0627fb81..b95c3cf0 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/pending_packets.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/pending_packets.go
@@ -27,34 +27,39 @@ const (
 	maxPendingPacketsPerResolution = 256
 )
 
+// +stateify savable
 type pendingPacket struct {
 	routeInfo RouteInfo
-	pkt       PacketBufferPtr
+	pkt       *PacketBuffer
+}
+
+// +stateify savable
+type packetsPendingLinkResolutionMu struct {
+	packetsPendingLinkResolutionMutex `state:"nosave"`
+
+	// The packets to send once the resolver completes.
+	//
+	// The link resolution channel is used as the key for this map.
+	packets map[<-chan struct{}][]pendingPacket
+
+	// FIFO of channels used to cancel the oldest goroutine waiting for
+	// link-address resolution.
+	//
+	// cancelChans holds the same channels that are used as keys to packets.
+	cancelChans []<-chan struct{}
 }
 
 // packetsPendingLinkResolution is a queue of packets pending link resolution.
 //
 // Once link resolution completes successfully, the packets will be written.
+//
+// +stateify savable
 type packetsPendingLinkResolution struct {
 	nic *nic
-
-	mu struct {
-		packetsPendingLinkResolutionMutex
-
-		// The packets to send once the resolver completes.
-		//
-		// The link resolution channel is used as the key for this map.
-		packets map[<-chan struct{}][]pendingPacket
-
-		// FIFO of channels used to cancel the oldest goroutine waiting for
-		// link-address resolution.
-		//
-		// cancelChans holds the same channels that are used as keys to packets.
-		cancelChans []<-chan struct{}
-	}
+	mu  packetsPendingLinkResolutionMu
 }
 
-func (f *packetsPendingLinkResolution) incrementOutgoingPacketErrors(pkt PacketBufferPtr) {
+func (f *packetsPendingLinkResolution) incrementOutgoingPacketErrors(pkt *PacketBuffer) {
 	f.nic.stack.stats.IP.OutgoingPacketErrors.Increment()
 
 	if ipEndpointStats, ok := f.nic.getNetworkEndpoint(pkt.NetworkProtocolNumber).Stats().(IPNetworkEndpointStats); ok {
@@ -113,7 +118,7 @@ func (f *packetsPendingLinkResolution) dequeue(ch <-chan struct{}, linkAddr tcpi
 // If the maximum number of pending resolutions is reached, the packets
 // associated with the oldest link resolution will be dequeued as if they failed
 // link resolution.
-func (f *packetsPendingLinkResolution) enqueue(r *Route, pkt PacketBufferPtr) tcpip.Error {
+func (f *packetsPendingLinkResolution) enqueue(r *Route, pkt *PacketBuffer) tcpip.Error {
 	f.mu.Lock()
 	// Make sure we attempt resolution while holding f's lock so that we avoid
 	// a race where link resolution completes before we enqueue the packets.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go
index bbfe1a79..24f0391b 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go
@@ -113,19 +113,16 @@ type TransportError interface {
 // TransportEndpoint is the interface that needs to be implemented by transport
 // protocol (e.g., tcp, udp) endpoints that can handle packets.
 type TransportEndpoint interface {
-	// UniqueID returns an unique ID for this transport endpoint.
-	UniqueID() uint64
-
 	// HandlePacket is called by the stack when new packets arrive to this
 	// transport endpoint. It sets the packet buffer's transport header.
 	//
 	// HandlePacket may modify the packet.
-	HandlePacket(TransportEndpointID, PacketBufferPtr)
+	HandlePacket(TransportEndpointID, *PacketBuffer)
 
 	// HandleError is called when the transport endpoint receives an error.
 	//
 	// HandleError takes may modify the packet buffer.
-	HandleError(TransportError, PacketBufferPtr)
+	HandleError(TransportError, *PacketBuffer)
 
 	// Abort initiates an expedited endpoint teardown. It puts the endpoint
 	// in a closed state and frees all resources associated with it. This
@@ -153,7 +150,7 @@ type RawTransportEndpoint interface {
 	// layer up.
 	//
 	// HandlePacket may modify the packet.
-	HandlePacket(PacketBufferPtr)
+	HandlePacket(*PacketBuffer)
 }
 
 // PacketEndpoint is the interface that needs to be implemented by packet
@@ -171,7 +168,7 @@ type PacketEndpoint interface {
 	// should construct its own ethernet header for applications.
 	//
 	// HandlePacket may modify pkt.
-	HandlePacket(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, pkt PacketBufferPtr)
+	HandlePacket(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, pkt *PacketBuffer)
 }
 
 // UnknownDestinationPacketDisposition enumerates the possible return values from
@@ -221,7 +218,7 @@ type TransportProtocol interface {
 	//
 	// HandleUnknownDestinationPacket may modify the packet if it handles
 	// the issue.
-	HandleUnknownDestinationPacket(TransportEndpointID, PacketBufferPtr) UnknownDestinationPacketDisposition
+	HandleUnknownDestinationPacket(TransportEndpointID, *PacketBuffer) UnknownDestinationPacketDisposition
 
 	// SetOption allows enabling/disabling protocol specific features.
 	// SetOption returns an error if the option is not supported or the
@@ -250,7 +247,7 @@ type TransportProtocol interface {
 	// Parse sets pkt.TransportHeader and trims pkt.Data appropriately. It does
 	// neither and returns false if pkt.Data is too small, i.e. pkt.Data.Size() <
 	// MinimumPacketSize()
-	Parse(pkt PacketBufferPtr) (ok bool)
+	Parse(pkt *PacketBuffer) (ok bool)
 }
 
 // TransportPacketDisposition is the result from attempting to deliver a packet
@@ -282,18 +279,18 @@ type TransportDispatcher interface {
 	// pkt.NetworkHeader must be set before calling DeliverTransportPacket.
 	//
 	// DeliverTransportPacket may modify the packet.
-	DeliverTransportPacket(tcpip.TransportProtocolNumber, PacketBufferPtr) TransportPacketDisposition
+	DeliverTransportPacket(tcpip.TransportProtocolNumber, *PacketBuffer) TransportPacketDisposition
 
 	// DeliverTransportError delivers an error to the appropriate transport
 	// endpoint.
 	//
 	// DeliverTransportError may modify the packet buffer.
-	DeliverTransportError(local, remote tcpip.Address, _ tcpip.NetworkProtocolNumber, _ tcpip.TransportProtocolNumber, _ TransportError, _ PacketBufferPtr)
+	DeliverTransportError(local, remote tcpip.Address, _ tcpip.NetworkProtocolNumber, _ tcpip.TransportProtocolNumber, _ TransportError, _ *PacketBuffer)
 
 	// DeliverRawPacket delivers a packet to any subscribed raw sockets.
 	//
 	// DeliverRawPacket does NOT take ownership of the packet buffer.
-	DeliverRawPacket(tcpip.TransportProtocolNumber, PacketBufferPtr)
+	DeliverRawPacket(tcpip.TransportProtocolNumber, *PacketBuffer)
 }
 
 // PacketLooping specifies where an outbound packet should be sent.
@@ -319,6 +316,9 @@ type NetworkHeaderParams struct {
 
 	// TOS refers to TypeOfService or TrafficClass field of the IP-header.
 	TOS uint8
+
+	// DF indicates whether the DF bit should be set.
+	DF bool
 }
 
 // GroupAddressableEndpoint is an endpoint that supports group addressing.
@@ -384,6 +384,8 @@ const (
 
 // AddressLifetimes encodes an address' preferred and valid lifetimes, as well
 // as if the address is deprecated.
+//
+// +stateify savable
 type AddressLifetimes struct {
 	// Deprecated is whether the address is deprecated.
 	Deprecated bool
@@ -668,10 +670,11 @@ type AddressableEndpoint interface {
 	// that is considered bound to the endpoint, optionally creating a temporary
 	// endpoint if requested and no existing address exists.
 	//
-	// The returned endpoint's reference count is incremented.
+	// The returned endpoint's reference count is incremented if readOnly is
+	// false.
 	//
 	// Returns nil if the specified address is not local to this endpoint.
-	AcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB PrimaryEndpointBehavior) AddressEndpoint
+	AcquireAssignedAddress(localAddr tcpip.Address, allowTemp bool, tempPEB PrimaryEndpointBehavior, readOnly bool) AddressEndpoint
 
 	// AcquireOutgoingPrimaryAddress returns a primary address that may be used as
 	// a source address when sending packets to the passed remote address.
@@ -681,7 +684,7 @@ type AddressableEndpoint interface {
 	// The returned endpoint's reference count is incremented.
 	//
 	// Returns nil if a primary address is not available.
-	AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) AddressEndpoint
+	AcquireOutgoingPrimaryAddress(remoteAddr, srcHint tcpip.Address, allowExpired bool) AddressEndpoint
 
 	// PrimaryAddresses returns the primary addresses.
 	PrimaryAddresses() []tcpip.AddressWithPrefix
@@ -740,13 +743,13 @@ type NetworkInterface interface {
 	CheckLocalAddress(tcpip.NetworkProtocolNumber, tcpip.Address) bool
 
 	// WritePacketToRemote writes the packet to the given remote link address.
-	WritePacketToRemote(tcpip.LinkAddress, PacketBufferPtr) tcpip.Error
+	WritePacketToRemote(tcpip.LinkAddress, *PacketBuffer) tcpip.Error
 
 	// WritePacket writes a packet through the given route.
 	//
 	// WritePacket may modify the packet buffer. The packet buffer's
 	// network and transport header must be set.
-	WritePacket(*Route, PacketBufferPtr) tcpip.Error
+	WritePacket(*Route, *PacketBuffer) tcpip.Error
 
 	// HandleNeighborProbe processes an incoming neighbor probe (e.g. ARP
 	// request or NDP Neighbor Solicitation).
@@ -764,7 +767,7 @@ type NetworkInterface interface {
 type LinkResolvableNetworkEndpoint interface {
 	// HandleLinkResolutionFailure is called when link resolution prevents the
 	// argument from having been sent.
-	HandleLinkResolutionFailure(PacketBufferPtr)
+	HandleLinkResolutionFailure(*PacketBuffer)
 }
 
 // NetworkEndpoint is the interface that needs to be implemented by endpoints
@@ -802,17 +805,17 @@ type NetworkEndpoint interface {
 	// WritePacket writes a packet to the given destination address and
 	// protocol. It may modify pkt. pkt.TransportHeader must have
 	// already been set.
-	WritePacket(r *Route, params NetworkHeaderParams, pkt PacketBufferPtr) tcpip.Error
+	WritePacket(r *Route, params NetworkHeaderParams, pkt *PacketBuffer) tcpip.Error
 
 	// WriteHeaderIncludedPacket writes a packet that includes a network
 	// header to the given destination address. It may modify pkt.
-	WriteHeaderIncludedPacket(r *Route, pkt PacketBufferPtr) tcpip.Error
+	WriteHeaderIncludedPacket(r *Route, pkt *PacketBuffer) tcpip.Error
 
 	// HandlePacket is called by the link layer when new packets arrive to
 	// this network endpoint. It sets pkt.NetworkHeader.
 	//
 	// HandlePacket may modify pkt.
-	HandlePacket(pkt PacketBufferPtr)
+	HandlePacket(pkt *PacketBuffer)
 
 	// Close is called when the endpoint is removed from a stack.
 	Close()
@@ -911,11 +914,13 @@ type NetworkProtocol interface {
 	//	- Whether there is an encapsulated transport protocol payload (e.g. ARP
 	//		does not encapsulate anything).
 	//	- Whether pkt.Data was large enough to parse and set pkt.NetworkHeader.
-	Parse(pkt PacketBufferPtr) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool)
+	Parse(pkt *PacketBuffer) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool)
 }
 
 // UnicastSourceAndMulticastDestination is a tuple that represents a unicast
 // source address and a multicast destination address.
+//
+// +stateify savable
 type UnicastSourceAndMulticastDestination struct {
 	// Source represents a unicast source address.
 	Source tcpip.Address
@@ -929,7 +934,7 @@ type MulticastRouteOutgoingInterface struct {
 	// ID corresponds to the outgoing NIC.
 	ID tcpip.NICID
 
-	// MinTTL represents the minumum TTL/HopLimit a multicast packet must have to
+	// MinTTL represents the minimum TTL/HopLimit a multicast packet must have to
 	// be sent through the outgoing interface.
 	//
 	// Note: a value of 0 allows all packets to be forwarded.
@@ -1027,14 +1032,14 @@ type NetworkDispatcher interface {
 	// If the link-layer has a header, the packet's link header must be populated.
 	//
 	// DeliverNetworkPacket may modify pkt.
-	DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr)
+	DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer)
 
 	// DeliverLinkPacket delivers a packet to any interested packet endpoints.
 	//
 	// This method should be called with both incoming and outgoing packets.
 	//
 	// If the link-layer has a header, the packet's link header must be populated.
-	DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr)
+	DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer)
 }
 
 // LinkEndpointCapabilities is the type associated with the capabilities
@@ -1082,6 +1087,9 @@ type NetworkLinkEndpoint interface {
 	// includes the maximum size of an IP packet.
 	MTU() uint32
 
+	// SetMTU update the maximum transmission unit for the endpoint.
+	SetMTU(mtu uint32)
+
 	// MaxHeaderLength returns the maximum size the data link (and
 	// lower level layers combined) headers can have. Higher levels use this
 	// information to reserve space in the front of the packets they're
@@ -1092,6 +1100,9 @@ type NetworkLinkEndpoint interface {
 	// endpoint.
 	LinkAddress() tcpip.LinkAddress
 
+	// SetLinkAddress updated the endpoint's link address (typically a MAC).
+	SetLinkAddress(addr tcpip.LinkAddress)
+
 	// Capabilities returns the set of capabilities supported by the
 	// endpoint.
 	Capabilities() LinkEndpointCapabilities
@@ -1123,10 +1134,19 @@ type NetworkLinkEndpoint interface {
 	ARPHardwareType() header.ARPHardwareType
 
 	// AddHeader adds a link layer header to the packet if required.
-	AddHeader(PacketBufferPtr)
+	AddHeader(*PacketBuffer)
 
 	// ParseHeader parses the link layer header to the packet.
-	ParseHeader(PacketBufferPtr) bool
+	ParseHeader(*PacketBuffer) bool
+
+	// Close is called when the endpoint is removed from a stack.
+	Close()
+
+	// SetOnCloseAction sets the action that will be exected before closing the
+	// endpoint. It is used to destroy a network device when its endpoint
+	// is closed. Endpoints that are closed only after destroying their
+	// network devices can implement this method as no-op.
+	SetOnCloseAction(func())
 }
 
 // QueueingDiscipline provides a queueing strategy for outgoing packets (e.g
@@ -1140,7 +1160,7 @@ type QueueingDiscipline interface {
 	// To participate in transparent bridging, a LinkEndpoint implementation
 	// should call eth.Encode with header.EthernetFields.SrcAddr set to
 	// pkg.EgressRoute.LocalLinkAddress if it is provided.
-	WritePacket(PacketBufferPtr) tcpip.Error
+	WritePacket(*PacketBuffer) tcpip.Error
 
 	Close()
 }
@@ -1161,7 +1181,7 @@ type InjectableLinkEndpoint interface {
 	LinkEndpoint
 
 	// InjectInbound injects an inbound packet.
-	InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr)
+	InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer)
 
 	// InjectOutbound writes a fully formed outbound packet directly to the
 	// link.
@@ -1240,6 +1260,8 @@ const (
 )
 
 // DADConfigurations holds configurations for duplicate address detection.
+//
+// +stateify savable
 type DADConfigurations struct {
 	// The number of Neighbor Solicitation messages to send when doing
 	// Duplicate Address Detection for a tentative address.
@@ -1370,9 +1392,9 @@ const (
 	// non-networking data layer.
 	HostGSOSupported
 
-	// GvisorGSOSupported indicates that segmentation offloading may be performed
+	// GVisorGSOSupported indicates that segmentation offloading may be performed
 	// in gVisor.
-	GvisorGSOSupported
+	GVisorGSOSupported
 )
 
 // GSOEndpoint provides access to GSO properties.
@@ -1384,6 +1406,6 @@ type GSOEndpoint interface {
 	SupportedGSO() SupportedGSO
 }
 
-// GvisorGSOMaxSize is a maximum allowed size of a software GSO segment.
+// GVisorGSOMaxSize is a maximum allowed size of a software GSO segment.
 // This isn't a hard limit, because it is never set into packet headers.
-const GvisorGSOMaxSize = 1 << 16
+const GVisorGSOMaxSize = 1 << 16
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go
index 32ce9a7f..e571e8a1 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go
@@ -54,6 +54,11 @@ type Route struct {
 	// neighbor cache.
 	// +checklocks:mu
 	neighborEntry *neighborEntry
+
+	// mtu is the maximum transmission unit to use for this route.
+	// If mtu is 0, this field is ignored and the MTU of the outgoing NIC
+	// is used for egress packets.
+	mtu uint32
 }
 
 // +stateify savable
@@ -101,6 +106,11 @@ func (r *Route) Loop() PacketLooping {
 	return r.routeInfo.Loop
 }
 
+// OutgoingNIC returns the route's outgoing NIC.
+func (r *Route) OutgoingNIC() tcpip.NICID {
+	return r.outgoingNIC.id
+}
+
 // RouteInfo contains all of Route's exported fields.
 //
 // +stateify savable
@@ -135,7 +145,7 @@ func (r *Route) fieldsLocked() RouteInfo {
 // ownership of the provided local address.
 //
 // Returns an empty route if validation fails.
-func constructAndValidateRoute(netProto tcpip.NetworkProtocolNumber, addressEndpoint AssignableAddressEndpoint, localAddressNIC, outgoingNIC *nic, gateway, localAddr, remoteAddr tcpip.Address, handleLocal, multicastLoop bool) *Route {
+func constructAndValidateRoute(netProto tcpip.NetworkProtocolNumber, addressEndpoint AssignableAddressEndpoint, localAddressNIC, outgoingNIC *nic, gateway, localAddr, remoteAddr tcpip.Address, handleLocal, multicastLoop bool, mtu uint32) *Route {
 	if localAddr.BitLen() == 0 {
 		localAddr = addressEndpoint.AddressWithPrefix().Address
 	}
@@ -160,6 +170,7 @@ func constructAndValidateRoute(netProto tcpip.NetworkProtocolNumber, addressEndp
 		addressEndpoint,
 		handleLocal,
 		multicastLoop,
+		mtu,
 	)
 
 	return r
@@ -167,7 +178,7 @@ func constructAndValidateRoute(netProto tcpip.NetworkProtocolNumber, addressEndp
 
 // makeRoute initializes a new route. It takes ownership of the provided
 // AssignableAddressEndpoint.
-func makeRoute(netProto tcpip.NetworkProtocolNumber, gateway, localAddr, remoteAddr tcpip.Address, outgoingNIC, localAddressNIC *nic, localAddressEndpoint AssignableAddressEndpoint, handleLocal, multicastLoop bool) *Route {
+func makeRoute(netProto tcpip.NetworkProtocolNumber, gateway, localAddr, remoteAddr tcpip.Address, outgoingNIC, localAddressNIC *nic, localAddressEndpoint AssignableAddressEndpoint, handleLocal, multicastLoop bool, mtu uint32) *Route {
 	if localAddressNIC.stack != outgoingNIC.stack {
 		panic(fmt.Sprintf("cannot create a route with NICs from different stacks"))
 	}
@@ -178,9 +189,9 @@ func makeRoute(netProto tcpip.NetworkProtocolNumber, gateway, localAddr, remoteA
 
 	loop := PacketOut
 
-	// TODO(gvisor.dev/issue/4689): Loopback interface loops back packets at the
-	// link endpoint level. We can remove this check once loopback interfaces
-	// loop back packets at the network layer.
+	// Loopback interface loops back packets at the link endpoint level. We
+	// could remove this check if loopback interfaces looped back packets
+	// at the network layer.
 	if !outgoingNIC.IsLoopback() {
 		if handleLocal && localAddr != (tcpip.Address{}) && remoteAddr == localAddr {
 			loop = PacketLoop
@@ -193,7 +204,7 @@ func makeRoute(netProto tcpip.NetworkProtocolNumber, gateway, localAddr, remoteA
 		}
 	}
 
-	r := makeRouteInner(netProto, localAddr, remoteAddr, outgoingNIC, localAddressNIC, localAddressEndpoint, loop)
+	r := makeRouteInner(netProto, localAddr, remoteAddr, outgoingNIC, localAddressNIC, localAddressEndpoint, loop, mtu)
 	if r.Loop()&PacketOut == 0 {
 		// Packet will not leave the stack, no need for a gateway or a remote link
 		// address.
@@ -233,7 +244,7 @@ func makeRoute(netProto tcpip.NetworkProtocolNumber, gateway, localAddr, remoteA
 	return r
 }
 
-func makeRouteInner(netProto tcpip.NetworkProtocolNumber, localAddr, remoteAddr tcpip.Address, outgoingNIC, localAddressNIC *nic, localAddressEndpoint AssignableAddressEndpoint, loop PacketLooping) *Route {
+func makeRouteInner(netProto tcpip.NetworkProtocolNumber, localAddr, remoteAddr tcpip.Address, outgoingNIC, localAddressNIC *nic, localAddressEndpoint AssignableAddressEndpoint, loop PacketLooping, mtu uint32) *Route {
 	r := &Route{
 		routeInfo: routeInfo{
 			NetProto:         netProto,
@@ -244,6 +255,7 @@ func makeRouteInner(netProto tcpip.NetworkProtocolNumber, localAddr, remoteAddr
 		},
 		localAddressNIC: localAddressNIC,
 		outgoingNIC:     outgoingNIC,
+		mtu:             mtu,
 	}
 
 	r.mu.Lock()
@@ -259,13 +271,13 @@ func makeRouteInner(netProto tcpip.NetworkProtocolNumber, localAddr, remoteAddr
 // A local route is a route to a destination that is local to the stack.
 func makeLocalRoute(netProto tcpip.NetworkProtocolNumber, localAddr, remoteAddr tcpip.Address, outgoingNIC, localAddressNIC *nic, localAddressEndpoint AssignableAddressEndpoint) *Route {
 	loop := PacketLoop
-	// TODO(gvisor.dev/issue/4689): Loopback interface loops back packets at the
-	// link endpoint level. We can remove this check once loopback interfaces
-	// loop back packets at the network layer.
+	// Loopback interface loops back packets at the link endpoint level. We
+	// could remove this check if loopback interfaces looped back packets
+	// at the network layer.
 	if outgoingNIC.IsLoopback() {
 		loop = PacketOut
 	}
-	return makeRouteInner(netProto, localAddr, remoteAddr, outgoingNIC, localAddressNIC, localAddressEndpoint, loop)
+	return makeRouteInner(netProto, localAddr, remoteAddr, outgoingNIC, localAddressNIC, localAddressEndpoint, loop, 0 /* mtu */)
 }
 
 // RemoteLinkAddress returns the link-layer (MAC) address of the next hop in
@@ -306,10 +318,10 @@ func (r *Route) RequiresTXTransportChecksum() bool {
 	return r.outgoingNIC.NetworkLinkEndpoint.Capabilities()&CapabilityTXChecksumOffload == 0
 }
 
-// HasGvisorGSOCapability returns true if the route supports gVisor GSO.
-func (r *Route) HasGvisorGSOCapability() bool {
+// HasGVisorGSOCapability returns true if the route supports gVisor GSO.
+func (r *Route) HasGVisorGSOCapability() bool {
 	if gso, ok := r.outgoingNIC.NetworkLinkEndpoint.(GSOEndpoint); ok {
-		return gso.SupportedGSO() == GvisorGSOSupported
+		return gso.SupportedGSO() == GVisorGSOSupported
 	}
 	return false
 }
@@ -487,7 +499,7 @@ func (r *Route) isValidForOutgoingRLocked() bool {
 }
 
 // WritePacket writes the packet through the given route.
-func (r *Route) WritePacket(params NetworkHeaderParams, pkt PacketBufferPtr) tcpip.Error {
+func (r *Route) WritePacket(params NetworkHeaderParams, pkt *PacketBuffer) tcpip.Error {
 	if !r.isValidForOutgoing() {
 		return &tcpip.ErrInvalidEndpointState{}
 	}
@@ -497,7 +509,7 @@ func (r *Route) WritePacket(params NetworkHeaderParams, pkt PacketBufferPtr) tcp
 
 // WriteHeaderIncludedPacket writes a packet already containing a network
 // header through the given route.
-func (r *Route) WriteHeaderIncludedPacket(pkt PacketBufferPtr) tcpip.Error {
+func (r *Route) WriteHeaderIncludedPacket(pkt *PacketBuffer) tcpip.Error {
 	if !r.isValidForOutgoing() {
 		return &tcpip.ErrInvalidEndpointState{}
 	}
@@ -510,8 +522,11 @@ func (r *Route) DefaultTTL() uint8 {
 	return r.outgoingNIC.getNetworkEndpoint(r.NetProto()).DefaultTTL()
 }
 
-// MTU returns the MTU of the underlying network endpoint.
+// MTU returns the MTU of the route if present, otherwise the MTU of the underlying network endpoint.
 func (r *Route) MTU() uint32 {
+	if r.mtu > 0 {
+		return r.mtu
+	}
 	return r.outgoingNIC.getNetworkEndpoint(r.NetProto()).MTU()
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_mutex.go
index 0a5bdd4e..28a5e869 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_mutex.go
@@ -17,7 +17,7 @@ type routeRWMutex struct {
 var routelockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type routelockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_stack_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_stack_mutex.go
index 1c7c9285..ec3796c3 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_stack_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_stack_mutex.go
@@ -17,7 +17,7 @@ type routeStackRWMutex struct {
 var routeStacklockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type routeStacklockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go
index e3a7f788..7dc7cd35 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go
@@ -43,25 +43,25 @@ const (
 	DefaultTOS = 0
 )
 
+// +stateify savable
 type transportProtocolState struct {
 	proto          TransportProtocol
-	defaultHandler func(id TransportEndpointID, pkt PacketBufferPtr) bool
+	defaultHandler func(id TransportEndpointID, pkt *PacketBuffer) bool `state:"nosave"`
 }
 
-// ResumableEndpoint is an endpoint that needs to be resumed after restore.
-type ResumableEndpoint interface {
-	// Resume resumes an endpoint after restore. This can be used to restart
-	// background workers such as protocol goroutines. This must be called after
-	// all indirect dependencies of the endpoint has been restored, which
+// RestoredEndpoint is an endpoint that needs to be restored.
+type RestoredEndpoint interface {
+	// Restore restores an endpoint. This can be used to restart background
+	// workers such as protocol goroutines. This must be called after all
+	// indirect dependencies of the endpoint has been restored, which
 	// generally implies at the end of the restore process.
-	Resume(*Stack)
+	Restore(*Stack)
 }
 
-// uniqueIDGenerator is a default unique ID generator.
-type uniqueIDGenerator atomicbitops.Uint64
-
-func (u *uniqueIDGenerator) UniqueID() uint64 {
-	return ((*atomicbitops.Uint64)(u)).Add(1)
+// ResumableEndpoint is an endpoint that needs to be resumed after save.
+type ResumableEndpoint interface {
+	// Resume resumes an endpoint.
+	Resume()
 }
 
 var netRawMissingLogger = log.BasicRateLimitedLogger(time.Minute)
@@ -70,6 +70,8 @@ var netRawMissingLogger = log.BasicRateLimitedLogger(time.Minute)
 // table.
 //
 // LOCK ORDERING: mu > routeMu.
+//
+// +stateify savable
 type Stack struct {
 	transportProtocols map[tcpip.TransportProtocolNumber]*transportProtocolState
 	networkProtocols   map[tcpip.NetworkProtocolNumber]NetworkProtocol
@@ -84,18 +86,23 @@ type Stack struct {
 	stats tcpip.Stats
 
 	// routeMu protects annotated fields below.
-	routeMu routeStackRWMutex
+	routeMu routeStackRWMutex `state:"nosave"`
 
+	// routeTable is a list of routes sorted by prefix length, longest (most specific) first.
 	// +checklocks:routeMu
-	routeTable []tcpip.Route
+	routeTable tcpip.RouteList
 
-	mu stackRWMutex
+	mu stackRWMutex `state:"nosave"`
+	// +checklocks:mu
+	nics map[tcpip.NICID]*nic
 	// +checklocks:mu
-	nics                     map[tcpip.NICID]*nic
 	defaultForwardingEnabled map[tcpip.NetworkProtocolNumber]struct{}
 
+	// nicIDGen is used to generate NIC IDs.
+	nicIDGen atomicbitops.Int32
+
 	// cleanupEndpointsMu protects cleanupEndpoints.
-	cleanupEndpointsMu cleanupEndpointsMutex
+	cleanupEndpointsMu cleanupEndpointsMutex `state:"nosave"`
 	// +checklocks:cleanupEndpointsMu
 	cleanupEndpoints map[TransportEndpoint]struct{}
 
@@ -103,7 +110,8 @@ type Stack struct {
 
 	// If not nil, then any new endpoints will have this probe function
 	// invoked everytime they receive a TCP segment.
-	tcpProbeFunc atomic.Value // TCPProbeFunc
+	// TODO(b/341946753): Restore them when netstack is savable.
+	tcpProbeFunc atomic.Value `state:"nosave"` // TCPProbeFunc
 
 	// clock is used to generate user-visible times.
 	clock tcpip.Clock
@@ -113,10 +121,14 @@ type Stack struct {
 
 	// tables are the iptables packet filtering and manipulation rules.
 	// TODO(gvisor.dev/issue/4595): S/R this field.
-	tables *IPTables
+	tables *IPTables `state:"nosave"`
 
-	// resumableEndpoints is a list of endpoints that need to be resumed if the
+	// restoredEndpoints is a list of endpoints that need to be restored if the
 	// stack is being restored.
+	restoredEndpoints []RestoredEndpoint
+
+	// resumableEndpoints is a list of endpoints that need to be resumed
+	// after save.
 	resumableEndpoints []ResumableEndpoint
 
 	// icmpRateLimiter is a global rate limiter for all ICMP messages generated
@@ -135,15 +147,15 @@ type Stack struct {
 	// integrator NUD related events.
 	nudDisp NUDDispatcher
 
-	// uniqueIDGenerator is a generator of unique identifiers.
-	uniqueIDGenerator UniqueID
-
 	// randomGenerator is an injectable pseudo random generator that can be
-	// used when a random number is required.
-	randomGenerator *rand.Rand
+	// used when a random number is required. It must not be used in
+	// security-sensitive contexts.
+	// TODO(b/341946753): Restore them when netstack is savable.
+	insecureRNG *rand.Rand `state:"nosave"`
 
 	// secureRNG is a cryptographically secure random number generator.
-	secureRNG io.Reader
+	// TODO(b/341946753): Restore them when netstack is savable.
+	secureRNG cryptorand.RNG `state:"nosave"`
 
 	// sendBufferSize holds the min/default/max send buffer sizes for
 	// endpoints other than TCP.
@@ -170,11 +182,6 @@ type Stack struct {
 	tsOffsetSecret uint32
 }
 
-// UniqueID is an abstract generator of unique identifiers.
-type UniqueID interface {
-	UniqueID() uint64
-}
-
 // NetworkProtocolFactory instantiates a network protocol.
 //
 // NetworkProtocolFactory must not attempt to modify the stack, it may only
@@ -208,9 +215,6 @@ type Options struct {
 	// stack (false).
 	HandleLocal bool
 
-	// UniqueID is an optional generator of unique identifiers.
-	UniqueID UniqueID
-
 	// NUDConfigs is the default NUD configurations used by interfaces.
 	NUDConfigs NUDConfigurations
 
@@ -277,7 +281,7 @@ type TransportEndpointInfo struct {
 // incompatible with the receiver.
 //
 // Preconditon: the parent endpoint mu must be held while calling this method.
-func (t *TransportEndpointInfo) AddrNetProtoLocked(addr tcpip.FullAddress, v6only bool) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) {
+func (t *TransportEndpointInfo) AddrNetProtoLocked(addr tcpip.FullAddress, v6only bool, bind bool) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) {
 	netProto := t.NetProto
 	switch addr.Addr.BitLen() {
 	case header.IPv4AddressSizeBits:
@@ -303,6 +307,22 @@ func (t *TransportEndpointInfo) AddrNetProtoLocked(addr tcpip.FullAddress, v6onl
 		}
 	}
 
+	if !bind && addr.Addr.Unspecified() {
+		// If the destination address isn't set, Linux sets it to the
+		// source address. If a source address isn't set either, it
+		// sets both to the loopback address.
+		if t.ID.LocalAddress.Unspecified() {
+			switch netProto {
+			case header.IPv4ProtocolNumber:
+				addr.Addr = header.IPv4Loopback
+			case header.IPv6ProtocolNumber:
+				addr.Addr = header.IPv6Loopback
+			}
+		} else {
+			addr.Addr = t.ID.LocalAddress
+		}
+	}
+
 	switch {
 	case netProto == t.NetProto:
 	case netProto == header.IPv4ProtocolNumber && t.NetProto == header.IPv6ProtocolNumber:
@@ -336,13 +356,10 @@ func New(opts Options) *Stack {
 		clock = tcpip.NewStdClock()
 	}
 
-	if opts.UniqueID == nil {
-		opts.UniqueID = new(uniqueIDGenerator)
-	}
-
 	if opts.SecureRNG == nil {
 		opts.SecureRNG = cryptorand.Reader
 	}
+	secureRNG := cryptorand.RNGFrom(opts.SecureRNG)
 
 	randSrc := opts.RandSource
 	if randSrc == nil {
@@ -354,13 +371,13 @@ func New(opts Options) *Stack {
 		// we wrap it in a simple thread-safe version.
 		randSrc = &lockedRandomSource{src: rand.NewSource(v)}
 	}
-	randomGenerator := rand.New(randSrc)
+	insecureRNG := rand.New(randSrc)
 
 	if opts.IPTables == nil {
 		if opts.DefaultIPTables == nil {
 			opts.DefaultIPTables = DefaultTables
 		}
-		opts.IPTables = opts.DefaultIPTables(clock, randomGenerator)
+		opts.IPTables = opts.DefaultIPTables(clock, insecureRNG)
 	}
 
 	opts.NUDConfigs.resetInvalidFields()
@@ -378,12 +395,11 @@ func New(opts Options) *Stack {
 		handleLocal:                  opts.HandleLocal,
 		tables:                       opts.IPTables,
 		icmpRateLimiter:              NewICMPRateLimiter(clock),
-		seed:                         randomGenerator.Uint32(),
+		seed:                         secureRNG.Uint32(),
 		nudConfigs:                   opts.NUDConfigs,
-		uniqueIDGenerator:            opts.UniqueID,
 		nudDisp:                      opts.NUDDisp,
-		randomGenerator:              randomGenerator,
-		secureRNG:                    opts.SecureRNG,
+		insecureRNG:                  insecureRNG,
+		secureRNG:                    secureRNG,
 		sendBufferSize: tcpip.SendBufferSizeOption{
 			Min:     MinBufferSize,
 			Default: DefaultBufferSize,
@@ -395,7 +411,7 @@ func New(opts Options) *Stack {
 			Max:     DefaultMaxBufferSize,
 		},
 		tcpInvalidRateLimit: defaultTCPInvalidRateLimit,
-		tsOffsetSecret:      randomGenerator.Uint32(),
+		tsOffsetSecret:      secureRNG.Uint32(),
 	}
 
 	// Add specified network protocols.
@@ -421,9 +437,13 @@ func New(opts Options) *Stack {
 	return s
 }
 
-// UniqueID returns a unique identifier.
-func (s *Stack) UniqueID() uint64 {
-	return s.uniqueIDGenerator.UniqueID()
+// NextNICID allocates the next available NIC ID and returns it.
+func (s *Stack) NextNICID() tcpip.NICID {
+	next := s.nicIDGen.Add(1)
+	if next < 0 {
+		panic("NICID overflow")
+	}
+	return tcpip.NICID(next)
 }
 
 // SetNetworkProtocolOption allows configuring individual protocol level
@@ -483,12 +503,22 @@ func (s *Stack) TransportProtocolOption(transport tcpip.TransportProtocolNumber,
 	return transProtoState.proto.Option(option)
 }
 
+// SendBufSizeProto is a protocol that can return its send buffer size.
+type SendBufSizeProto interface {
+	SendBufferSize() tcpip.TCPSendBufferSizeRangeOption
+}
+
+// TCPSendBufferLimits returns the TCP send buffer size limit.
+func (s *Stack) TCPSendBufferLimits() tcpip.TCPSendBufferSizeRangeOption {
+	return s.transportProtocols[header.TCPProtocolNumber].proto.(SendBufSizeProto).SendBufferSize()
+}
+
 // SetTransportProtocolHandler sets the per-stack default handler for the given
 // protocol.
 //
 // It must be called only during initialization of the stack. Changing it as the
 // stack is operating is not supported.
-func (s *Stack) SetTransportProtocolHandler(p tcpip.TransportProtocolNumber, h func(TransportEndpointID, PacketBufferPtr) bool) {
+func (s *Stack) SetTransportProtocolHandler(p tcpip.TransportProtocolNumber, h func(TransportEndpointID, *PacketBuffer) bool) {
 	state := s.transportProtocols[p]
 	if state != nil {
 		state.defaultHandler = h
@@ -667,10 +697,6 @@ func (s *Stack) DisableMulticastForwardingForProtocol(protocol tcpip.NetworkProt
 // the specified NIC for the passed protocol.
 //
 // Returns the previous configuration on the NIC.
-//
-// TODO(https://gvisor.dev/issue/7338): Implement support for multicast
-// forwarding. Currently, setting this value is a no-op and is not ready for
-// use.
 func (s *Stack) SetNICMulticastForwarding(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber, enable bool) (bool, tcpip.Error) {
 	s.mu.RLock()
 	defer s.mu.RUnlock()
@@ -709,33 +735,6 @@ func (s *Stack) SetPortRange(start uint16, end uint16) tcpip.Error {
 	return s.PortManager.SetPortRange(start, end)
 }
 
-// GROTimeout returns the GRO timeout.
-func (s *Stack) GROTimeout(nicID tcpip.NICID) (time.Duration, tcpip.Error) {
-	s.mu.RLock()
-	defer s.mu.RUnlock()
-
-	nic, ok := s.nics[nicID]
-	if !ok {
-		return 0, &tcpip.ErrUnknownNICID{}
-	}
-
-	return nic.gro.getInterval(), nil
-}
-
-// SetGROTimeout sets the GRO timeout.
-func (s *Stack) SetGROTimeout(nicID tcpip.NICID, timeout time.Duration) tcpip.Error {
-	s.mu.RLock()
-	defer s.mu.RUnlock()
-
-	nic, ok := s.nics[nicID]
-	if !ok {
-		return &tcpip.ErrUnknownNICID{}
-	}
-
-	nic.gro.setInterval(timeout)
-	return nil
-}
-
 // SetRouteTable assigns the route table to be used by this stack. It
 // specifies which NIC to use for given destination address ranges.
 //
@@ -743,21 +742,41 @@ func (s *Stack) SetGROTimeout(nicID tcpip.NICID, timeout time.Duration) tcpip.Er
 func (s *Stack) SetRouteTable(table []tcpip.Route) {
 	s.routeMu.Lock()
 	defer s.routeMu.Unlock()
-	s.routeTable = table
+	s.routeTable.Reset()
+	for _, r := range table {
+		s.addRouteLocked(&r)
+	}
 }
 
 // GetRouteTable returns the route table which is currently in use.
 func (s *Stack) GetRouteTable() []tcpip.Route {
 	s.routeMu.RLock()
 	defer s.routeMu.RUnlock()
-	return append([]tcpip.Route(nil), s.routeTable...)
+	table := make([]tcpip.Route, 0)
+	for r := s.routeTable.Front(); r != nil; r = r.Next() {
+		table = append(table, *r)
+	}
+	return table
 }
 
 // AddRoute appends a route to the route table.
 func (s *Stack) AddRoute(route tcpip.Route) {
 	s.routeMu.Lock()
 	defer s.routeMu.Unlock()
-	s.routeTable = append(s.routeTable, route)
+	s.addRouteLocked(&route)
+}
+
+// +checklocks:s.routeMu
+func (s *Stack) addRouteLocked(route *tcpip.Route) {
+	routePrefix := route.Destination.Prefix()
+	n := s.routeTable.Front()
+	for ; n != nil; n = n.Next() {
+		if n.Destination.Prefix() < routePrefix {
+			s.routeTable.InsertBefore(n, route)
+			return
+		}
+	}
+	s.routeTable.PushBack(route)
 }
 
 // RemoveRoutes removes matching routes from the route table.
@@ -765,13 +784,32 @@ func (s *Stack) RemoveRoutes(match func(tcpip.Route) bool) {
 	s.routeMu.Lock()
 	defer s.routeMu.Unlock()
 
-	var filteredRoutes []tcpip.Route
-	for _, route := range s.routeTable {
-		if !match(route) {
-			filteredRoutes = append(filteredRoutes, route)
+	s.removeRoutesLocked(match)
+}
+
+// +checklocks:s.routeMu
+func (s *Stack) removeRoutesLocked(match func(tcpip.Route) bool) {
+	for route := s.routeTable.Front(); route != nil; {
+		next := route.Next()
+		if match(*route) {
+			s.routeTable.Remove(route)
 		}
+		route = next
 	}
-	s.routeTable = filteredRoutes
+}
+
+// ReplaceRoute replaces the route in the routing table which matchse
+// the lookup key for the routing table. If there is no match, the given
+// route will still be added to the routing table.
+// The lookup key consists of destination, ToS, scope and output interface.
+func (s *Stack) ReplaceRoute(route tcpip.Route) {
+	s.routeMu.Lock()
+	defer s.routeMu.Unlock()
+
+	s.removeRoutesLocked(func(rt tcpip.Route) bool {
+		return rt.Equal(route)
+	})
+	s.addRouteLocked(&route)
 }
 
 // NewEndpoint creates a new transport layer endpoint of the given protocol.
@@ -837,8 +875,21 @@ type NICOptions struct {
 	// QDisc is the queue discipline to use for this NIC.
 	QDisc QueueingDiscipline
 
-	// GROTimeout specifies the GRO timeout. Zero bypasses GRO.
-	GROTimeout time.Duration
+	// DeliverLinkPackets specifies whether the NIC is responsible for
+	// delivering raw packets to packet sockets.
+	DeliverLinkPackets bool
+}
+
+// GetNICByID return a network device associated with the specified ID.
+func (s *Stack) GetNICByID(id tcpip.NICID) (*nic, tcpip.Error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	n, ok := s.nics[id]
+	if !ok {
+		return nil, &tcpip.ErrNoSuchFile{}
+	}
+	return n, nil
 }
 
 // CreateNICWithOptions creates a NIC with the provided id, LinkEndpoint, and
@@ -850,6 +901,9 @@ func (s *Stack) CreateNICWithOptions(id tcpip.NICID, ep LinkEndpoint, opts NICOp
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
+	if id == 0 {
+		return &tcpip.ErrInvalidNICID{}
+	}
 	// Make sure id is unique.
 	if _, ok := s.nics[id]; ok {
 		return &tcpip.ErrDuplicateNICID{}
@@ -871,6 +925,9 @@ func (s *Stack) CreateNICWithOptions(id tcpip.NICID, ep LinkEndpoint, opts NICOp
 		}
 	}
 	s.nics[id] = n
+	ep.SetOnCloseAction(func() {
+		s.RemoveNIC(id)
+	})
 	if !opts.Disabled {
 		return n.enable()
 	}
@@ -944,36 +1001,107 @@ func (s *Stack) CheckNIC(id tcpip.NICID) bool {
 // RemoveNIC removes NIC and all related routes from the network stack.
 func (s *Stack) RemoveNIC(id tcpip.NICID) tcpip.Error {
 	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	return s.removeNICLocked(id)
+	deferAct, err := s.removeNICLocked(id)
+	s.mu.Unlock()
+	if deferAct != nil {
+		deferAct()
+	}
+	return err
 }
 
 // removeNICLocked removes NIC and all related routes from the network stack.
 //
 // +checklocks:s.mu
-func (s *Stack) removeNICLocked(id tcpip.NICID) tcpip.Error {
+func (s *Stack) removeNICLocked(id tcpip.NICID) (func(), tcpip.Error) {
 	nic, ok := s.nics[id]
 	if !ok {
-		return &tcpip.ErrUnknownNICID{}
+		return nil, &tcpip.ErrUnknownNICID{}
 	}
 	delete(s.nics, id)
 
+	if nic.Primary != nil {
+		b := nic.Primary.NetworkLinkEndpoint.(CoordinatorNIC)
+		if err := b.DelNIC(nic); err != nil {
+			return nil, err
+		}
+	}
+
 	// Remove routes in-place. n tracks the number of routes written.
 	s.routeMu.Lock()
-	n := 0
-	for i, r := range s.routeTable {
-		s.routeTable[i] = tcpip.Route{}
-		if r.NIC != id {
-			// Keep this route.
-			s.routeTable[n] = r
-			n++
+	for r := s.routeTable.Front(); r != nil; {
+		next := r.Next()
+		if r.NIC == id {
+			s.routeTable.Remove(r)
 		}
+		r = next
 	}
-	s.routeTable = s.routeTable[:n]
 	s.routeMu.Unlock()
 
-	return nic.remove()
+	return nic.remove(true /* closeLinkEndpoint */)
+}
+
+// SetNICCoordinator sets a coordinator device.
+func (s *Stack) SetNICCoordinator(id tcpip.NICID, mid tcpip.NICID) tcpip.Error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	nic, ok := s.nics[id]
+	if !ok {
+		return &tcpip.ErrUnknownNICID{}
+	}
+
+	m, ok := s.nics[mid]
+	if !ok {
+		return &tcpip.ErrUnknownNICID{}
+	}
+	b, ok := m.NetworkLinkEndpoint.(CoordinatorNIC)
+	if !ok {
+		return &tcpip.ErrNotSupported{}
+	}
+	if err := b.AddNIC(nic); err != nil {
+		return err
+	}
+	nic.Primary = m
+	return nil
+}
+
+// SetNICAddress sets the hardware address which is identified by the nic ID.
+func (s *Stack) SetNICAddress(id tcpip.NICID, addr tcpip.LinkAddress) tcpip.Error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	nic, ok := s.nics[id]
+	if !ok {
+		return &tcpip.ErrUnknownNICID{}
+	}
+	nic.NetworkLinkEndpoint.SetLinkAddress(addr)
+	return nil
+}
+
+// SetNICName sets a NIC's name.
+func (s *Stack) SetNICName(id tcpip.NICID, name string) tcpip.Error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	nic, ok := s.nics[id]
+	if !ok {
+		return &tcpip.ErrUnknownNICID{}
+	}
+	nic.name = name
+	return nil
+}
+
+// SetNICMTU sets a NIC's MTU.
+func (s *Stack) SetNICMTU(id tcpip.NICID, mtu uint32) tcpip.Error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	nic, ok := s.nics[id]
+	if !ok {
+		return &tcpip.ErrUnknownNICID{}
+	}
+	nic.NetworkLinkEndpoint.SetMTU(mtu)
+	return nil
 }
 
 // NICInfo captures the name and addresses assigned to a NIC.
@@ -1166,9 +1294,9 @@ func (s *Stack) GetMainNICAddress(id tcpip.NICID, protocol tcpip.NetworkProtocol
 	return nic.PrimaryAddress(protocol)
 }
 
-func (s *Stack) getAddressEP(nic *nic, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) AssignableAddressEndpoint {
+func (s *Stack) getAddressEP(nic *nic, localAddr, remoteAddr, srcHint tcpip.Address, netProto tcpip.NetworkProtocolNumber) AssignableAddressEndpoint {
 	if localAddr.BitLen() == 0 {
-		return nic.primaryEndpoint(netProto, remoteAddr)
+		return nic.primaryEndpoint(netProto, remoteAddr, srcHint)
 	}
 	return nic.findEndpoint(netProto, localAddr, CanBePrimaryEndpoint)
 }
@@ -1186,8 +1314,8 @@ func (s *Stack) NewRouteForMulticast(nicID tcpip.NICID, remoteAddr tcpip.Address
 		return nil
 	}
 
-	if addressEndpoint := s.getAddressEP(nic, tcpip.Address{} /* localAddr */, remoteAddr, netProto); addressEndpoint != nil {
-		return constructAndValidateRoute(netProto, addressEndpoint, nic, nic, tcpip.Address{} /* gateway */, tcpip.Address{} /* localAddr */, remoteAddr, s.handleLocal, false /* multicastLoop */)
+	if addressEndpoint := s.getAddressEP(nic, tcpip.Address{} /* localAddr */, remoteAddr, tcpip.Address{} /* srcHint */, netProto); addressEndpoint != nil {
+		return constructAndValidateRoute(netProto, addressEndpoint, nic, nic, tcpip.Address{} /* gateway */, tcpip.Address{} /* localAddr */, remoteAddr, s.handleLocal, false /* multicastLoop */, 0 /* mtu */)
 	}
 	return nil
 }
@@ -1298,14 +1426,14 @@ func isNICForwarding(nic *nic, proto tcpip.NetworkProtocolNumber) bool {
 // endpoint.
 //
 // +checklocksread:s.mu
-func (s *Stack) findRouteWithLocalAddrFromAnyInterfaceRLocked(outgoingNIC *nic, localAddr, remoteAddr, gateway tcpip.Address, netProto tcpip.NetworkProtocolNumber, multicastLoop bool) *Route {
+func (s *Stack) findRouteWithLocalAddrFromAnyInterfaceRLocked(outgoingNIC *nic, localAddr, remoteAddr, srcHint, gateway tcpip.Address, netProto tcpip.NetworkProtocolNumber, multicastLoop bool, mtu uint32) *Route {
 	for _, aNIC := range s.nics {
-		addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, netProto)
+		addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, srcHint, netProto)
 		if addressEndpoint == nil {
 			continue
 		}
 
-		if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, outgoingNIC, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil {
+		if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, outgoingNIC, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop, mtu); r != nil {
 			return r
 		}
 	}
@@ -1321,7 +1449,7 @@ func (s *Stack) findRouteWithLocalAddrFromAnyInterfaceRLocked(outgoingNIC *nic,
 // leave through any interface unless the route is link-local.
 //
 // If no local address is provided, the stack will select a local address. If no
-// remote address is provided, the stack wil use a remote address equal to the
+// remote address is provided, the stack will use a remote address equal to the
 // local address.
 func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber, multicastLoop bool) (*Route, tcpip.Error) {
 	s.mu.RLock()
@@ -1348,17 +1476,18 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n
 	// through the interface if the interface is valid and enabled.
 	if id != 0 && !needRoute {
 		if nic, ok := s.nics[id]; ok && nic.Enabled() {
-			if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, netProto); addressEndpoint != nil {
+			if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, tcpip.Address{} /* srcHint */, netProto); addressEndpoint != nil {
 				return makeRoute(
 					netProto,
 					tcpip.Address{}, /* gateway */
 					localAddr,
 					remoteAddr,
-					nic, /* outboundNIC */
+					nic, /* outgoingNIC */
 					nic, /* localAddressNIC*/
 					addressEndpoint,
 					s.handleLocal,
 					multicastLoop,
+					0, /* mtu */
 				), nil
 			}
 		}
@@ -1377,7 +1506,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n
 		s.routeMu.RLock()
 		defer s.routeMu.RUnlock()
 
-		for _, route := range s.routeTable {
+		for route := s.routeTable.Front(); route != nil; route = route.Next() {
 			if remoteAddr.BitLen() != 0 && !route.Destination.Contains(remoteAddr) {
 				continue
 			}
@@ -1388,12 +1517,12 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n
 			}
 
 			if id == 0 || id == route.NIC {
-				if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, netProto); addressEndpoint != nil {
+				if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, route.SourceHint, netProto); addressEndpoint != nil {
 					var gateway tcpip.Address
 					if needRoute {
 						gateway = route.Gateway
 					}
-					r := constructAndValidateRoute(netProto, addressEndpoint, nic /* outgoingNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop)
+					r := constructAndValidateRoute(netProto, addressEndpoint, nic /* outgoingNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop, route.MTU)
 					if r == nil {
 						panic(fmt.Sprintf("non-forwarding route validation failed with route table entry = %#v, id = %d, localAddr = %s, remoteAddr = %s", route, id, localAddr, remoteAddr))
 					}
@@ -1416,10 +1545,11 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n
 			locallyGenerated := (id != 0 || localAddr != tcpip.Address{})
 			if onlyGlobalAddresses && chosenRoute.Equal(tcpip.Route{}) && isNICForwarding(nic, netProto) {
 				if locallyGenerated {
-					chosenRoute = route
+					chosenRoute = *route
 					continue
 				}
-				if r := s.findRouteWithLocalAddrFromAnyInterfaceRLocked(nic, localAddr, remoteAddr, route.Gateway, netProto, multicastLoop); r != nil {
+
+				if r := s.findRouteWithLocalAddrFromAnyInterfaceRLocked(nic, localAddr, remoteAddr, route.SourceHint, route.Gateway, netProto, multicastLoop, route.MTU); r != nil {
 					return r
 				}
 			}
@@ -1447,8 +1577,8 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n
 		// Use the specified NIC to get the local address endpoint.
 		if id != 0 {
 			if aNIC, ok := s.nics[id]; ok {
-				if addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, netProto); addressEndpoint != nil {
-					if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil {
+				if addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, chosenRoute.SourceHint, netProto); addressEndpoint != nil {
+					if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop, chosenRoute.MTU); r != nil {
 						return r, nil
 					}
 				}
@@ -1461,7 +1591,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n
 		if id == 0 {
 			// If an interface is not specified, try to find a NIC that holds the local
 			// address endpoint to construct a route.
-			if r := s.findRouteWithLocalAddrFromAnyInterfaceRLocked(nic, localAddr, remoteAddr, gateway, netProto, multicastLoop); r != nil {
+			if r := s.findRouteWithLocalAddrFromAnyInterfaceRLocked(nic, localAddr, remoteAddr, chosenRoute.SourceHint, gateway, netProto, multicastLoop, chosenRoute.MTU); r != nil {
 				return r, nil
 			}
 		}
@@ -1625,7 +1755,7 @@ func (s *Stack) AddStaticNeighbor(nicID tcpip.NICID, protocol tcpip.NetworkProto
 }
 
 // RemoveNeighbor removes an IP to MAC address association previously created
-// either automically or by AddStaticNeighbor. Returns ErrBadAddress if there
+// either automatically or by AddStaticNeighbor. Returns ErrBadAddress if there
 // is no association with the provided address.
 func (s *Stack) RemoveNeighbor(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) tcpip.Error {
 	s.mu.RLock()
@@ -1711,16 +1841,26 @@ func (s *Stack) UnregisterRawTransportEndpoint(netProto tcpip.NetworkProtocolNum
 
 // RegisterRestoredEndpoint records e as an endpoint that has been restored on
 // this stack.
-func (s *Stack) RegisterRestoredEndpoint(e ResumableEndpoint) {
+func (s *Stack) RegisterRestoredEndpoint(e RestoredEndpoint) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	s.restoredEndpoints = append(s.restoredEndpoints, e)
+}
+
+// RegisterResumableEndpoint records e as an endpoint that has to be resumed.
+func (s *Stack) RegisterResumableEndpoint(e ResumableEndpoint) {
 	s.mu.Lock()
+	defer s.mu.Unlock()
+
 	s.resumableEndpoints = append(s.resumableEndpoints, e)
-	s.mu.Unlock()
 }
 
 // RegisteredEndpoints returns all endpoints which are currently registered.
 func (s *Stack) RegisteredEndpoints() []TransportEndpoint {
 	s.mu.Lock()
 	defer s.mu.Unlock()
+
 	var es []TransportEndpoint
 	for _, e := range s.demux.protocol {
 		es = append(es, e.transportEndpoints()...)
@@ -1731,11 +1871,12 @@ func (s *Stack) RegisteredEndpoints() []TransportEndpoint {
 // CleanupEndpoints returns endpoints currently in the cleanup state.
 func (s *Stack) CleanupEndpoints() []TransportEndpoint {
 	s.cleanupEndpointsMu.Lock()
+	defer s.cleanupEndpointsMu.Unlock()
+
 	es := make([]TransportEndpoint, 0, len(s.cleanupEndpoints))
 	for e := range s.cleanupEndpoints {
 		es = append(es, e)
 	}
-	s.cleanupEndpointsMu.Unlock()
 	return es
 }
 
@@ -1743,10 +1884,11 @@ func (s *Stack) CleanupEndpoints() []TransportEndpoint {
 // for restoring a stack after a save.
 func (s *Stack) RestoreCleanupEndpoints(es []TransportEndpoint) {
 	s.cleanupEndpointsMu.Lock()
+	defer s.cleanupEndpointsMu.Unlock()
+
 	for _, e := range es {
 		s.cleanupEndpoints[e] = struct{}{}
 	}
-	s.cleanupEndpointsMu.Unlock()
 }
 
 // Close closes all currently registered transport endpoints.
@@ -1785,14 +1927,22 @@ func (s *Stack) Wait() {
 		p.Wait()
 	}
 
-	s.mu.Lock()
-	defer s.mu.Unlock()
+	deferActs := make([]func(), 0)
 
+	s.mu.Lock()
 	for id, n := range s.nics {
 		// Remove NIC to ensure that qDisc goroutines are correctly
 		// terminated on stack teardown.
-		s.removeNICLocked(id)
+		act, _ := s.removeNICLocked(id)
 		n.NetworkLinkEndpoint.Wait()
+		if act != nil {
+			deferActs = append(deferActs, act)
+		}
+	}
+	s.mu.Unlock()
+
+	for _, act := range deferActs {
+		act()
 	}
 }
 
@@ -1809,17 +1959,32 @@ func (s *Stack) Pause() {
 	}
 }
 
-// Resume restarts the stack after a restore. This must be called after the
+// Restore restarts the stack after a restore. This must be called after the
 // entire system has been restored.
+func (s *Stack) Restore() {
+	// RestoredEndpoint.Restore() may call other methods on s, so we can't hold
+	// s.mu while restoring the endpoints.
+	s.mu.Lock()
+	eps := s.restoredEndpoints
+	s.restoredEndpoints = nil
+	s.mu.Unlock()
+	for _, e := range eps {
+		e.Restore(s)
+	}
+	// Now resume any protocol level background workers.
+	for _, p := range s.transportProtocols {
+		p.proto.Resume()
+	}
+}
+
+// Resume resumes the stack after a save.
 func (s *Stack) Resume() {
-	// ResumableEndpoint.Resume() may call other methods on s, so we can't hold
-	// s.mu while resuming the endpoints.
 	s.mu.Lock()
 	eps := s.resumableEndpoints
 	s.resumableEndpoints = nil
 	s.mu.Unlock()
 	for _, e := range eps {
-		e.Resume(s)
+		e.Resume()
 	}
 	// Now resume any protocol level background workers.
 	for _, p := range s.transportProtocols {
@@ -1838,10 +2003,7 @@ func (s *Stack) RegisterPacketEndpoint(nicID tcpip.NICID, netProto tcpip.Network
 	if nicID == 0 {
 		// Register with each NIC.
 		for _, nic := range s.nics {
-			if err := nic.registerPacketEndpoint(netProto, ep); err != nil {
-				s.unregisterPacketEndpointLocked(0, netProto, ep)
-				return err
-			}
+			nic.registerPacketEndpoint(netProto, ep)
 		}
 		return nil
 	}
@@ -1851,9 +2013,7 @@ func (s *Stack) RegisterPacketEndpoint(nicID tcpip.NICID, netProto tcpip.Network
 	if !ok {
 		return &tcpip.ErrUnknownNICID{}
 	}
-	if err := nic.registerPacketEndpoint(netProto, ep); err != nil {
-		return err
-	}
+	nic.registerPacketEndpoint(netProto, ep)
 
 	return nil
 }
@@ -2096,15 +2256,16 @@ func (s *Stack) Seed() uint32 {
 	return s.seed
 }
 
-// Rand returns a reference to a pseudo random generator that can be used
-// to generate random numbers as required.
-func (s *Stack) Rand() *rand.Rand {
-	return s.randomGenerator
+// InsecureRNG returns a reference to a pseudo random generator that can be used
+// to generate random numbers as required. It is not cryptographically secure
+// and should not be used for security sensitive work.
+func (s *Stack) InsecureRNG() *rand.Rand {
+	return s.insecureRNG
 }
 
 // SecureRNG returns the stack's cryptographically secure random number
 // generator.
-func (s *Stack) SecureRNG() io.Reader {
+func (s *Stack) SecureRNG() cryptorand.RNG {
 	return s.secureRNG
 }
 
@@ -2138,7 +2299,7 @@ const (
 
 // ParsePacketBufferTransport parses the provided packet buffer's transport
 // header.
-func (s *Stack) ParsePacketBufferTransport(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr) ParseResult {
+func (s *Stack) ParsePacketBufferTransport(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer) ParseResult {
 	pkt.TransportProtocolNumber = protocol
 	// Parse the transport header if present.
 	state, ok := s.transportProtocols[protocol]
@@ -2208,3 +2369,33 @@ func (s *Stack) IsSubnetBroadcast(nicID tcpip.NICID, protocol tcpip.NetworkProto
 func (s *Stack) PacketEndpointWriteSupported() bool {
 	return s.packetEndpointWriteSupported
 }
+
+// SetNICStack moves the network device to the specified network namespace.
+func (s *Stack) SetNICStack(id tcpip.NICID, peer *Stack) (tcpip.NICID, tcpip.Error) {
+	s.mu.Lock()
+	nic, ok := s.nics[id]
+	if !ok {
+		s.mu.Unlock()
+		return 0, &tcpip.ErrUnknownNICID{}
+	}
+	if s == peer {
+		s.mu.Unlock()
+		return id, nil
+	}
+	delete(s.nics, id)
+
+	// Remove routes in-place. n tracks the number of routes written.
+	s.RemoveRoutes(func(r tcpip.Route) bool { return r.NIC == id })
+	ne := nic.NetworkLinkEndpoint.(LinkEndpoint)
+	deferAct, err := nic.remove(false /* closeLinkEndpoint */)
+	s.mu.Unlock()
+	if deferAct != nil {
+		deferAct()
+	}
+	if err != nil {
+		return 0, err
+	}
+
+	id = tcpip.NICID(peer.NextNICID())
+	return id, peer.CreateNICWithOptions(id, ne, NICOptions{Name: nic.Name()})
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_mutex.go
index 23e5d098..ef672873 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_mutex.go
@@ -17,7 +17,7 @@ type stackRWMutex struct {
 var stacklockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type stacklockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go
index cb566962..76468e74 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go
@@ -3,6 +3,8 @@
 package stack
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -25,9 +27,117 @@ func (r *addressStateRefs) StateSave(stateSinkObject state.Sink) {
 }
 
 // +checklocksignore
-func (r *addressStateRefs) StateLoad(stateSourceObject state.Source) {
+func (r *addressStateRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.refCount)
-	stateSourceObject.AfterLoad(r.afterLoad)
+	stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) })
+}
+
+func (a *AddressableEndpointState) StateTypeName() string {
+	return "pkg/tcpip/stack.AddressableEndpointState"
+}
+
+func (a *AddressableEndpointState) StateFields() []string {
+	return []string{
+		"networkEndpoint",
+		"options",
+		"endpoints",
+		"primary",
+	}
+}
+
+func (a *AddressableEndpointState) beforeSave() {}
+
+// +checklocksignore
+func (a *AddressableEndpointState) StateSave(stateSinkObject state.Sink) {
+	a.beforeSave()
+	stateSinkObject.Save(0, &a.networkEndpoint)
+	stateSinkObject.Save(1, &a.options)
+	stateSinkObject.Save(2, &a.endpoints)
+	stateSinkObject.Save(3, &a.primary)
+}
+
+func (a *AddressableEndpointState) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (a *AddressableEndpointState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &a.networkEndpoint)
+	stateSourceObject.Load(1, &a.options)
+	stateSourceObject.Load(2, &a.endpoints)
+	stateSourceObject.Load(3, &a.primary)
+}
+
+func (a *AddressableEndpointStateOptions) StateTypeName() string {
+	return "pkg/tcpip/stack.AddressableEndpointStateOptions"
+}
+
+func (a *AddressableEndpointStateOptions) StateFields() []string {
+	return []string{
+		"HiddenWhileDisabled",
+	}
+}
+
+func (a *AddressableEndpointStateOptions) beforeSave() {}
+
+// +checklocksignore
+func (a *AddressableEndpointStateOptions) StateSave(stateSinkObject state.Sink) {
+	a.beforeSave()
+	stateSinkObject.Save(0, &a.HiddenWhileDisabled)
+}
+
+func (a *AddressableEndpointStateOptions) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (a *AddressableEndpointStateOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &a.HiddenWhileDisabled)
+}
+
+func (a *addressState) StateTypeName() string {
+	return "pkg/tcpip/stack.addressState"
+}
+
+func (a *addressState) StateFields() []string {
+	return []string{
+		"addressableEndpointState",
+		"addr",
+		"subnet",
+		"temporary",
+		"refs",
+		"kind",
+		"configType",
+		"lifetimes",
+		"disp",
+	}
+}
+
+func (a *addressState) beforeSave() {}
+
+// +checklocksignore
+func (a *addressState) StateSave(stateSinkObject state.Sink) {
+	a.beforeSave()
+	stateSinkObject.Save(0, &a.addressableEndpointState)
+	stateSinkObject.Save(1, &a.addr)
+	stateSinkObject.Save(2, &a.subnet)
+	stateSinkObject.Save(3, &a.temporary)
+	stateSinkObject.Save(4, &a.refs)
+	stateSinkObject.Save(5, &a.kind)
+	stateSinkObject.Save(6, &a.configType)
+	stateSinkObject.Save(7, &a.lifetimes)
+	stateSinkObject.Save(8, &a.disp)
+}
+
+func (a *addressState) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (a *addressState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &a.addressableEndpointState)
+	stateSourceObject.Load(1, &a.addr)
+	stateSourceObject.Load(2, &a.subnet)
+	stateSourceObject.Load(3, &a.temporary)
+	stateSourceObject.Load(4, &a.refs)
+	stateSourceObject.Load(5, &a.kind)
+	stateSourceObject.Load(6, &a.configType)
+	stateSourceObject.Load(7, &a.lifetimes)
+	stateSourceObject.Load(8, &a.disp)
 }
 
 func (t *tuple) StateTypeName() string {
@@ -54,10 +164,10 @@ func (t *tuple) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(3, &t.tupleID)
 }
 
-func (t *tuple) afterLoad() {}
+func (t *tuple) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *tuple) StateLoad(stateSourceObject state.Source) {
+func (t *tuple) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.tupleEntry)
 	stateSourceObject.Load(1, &t.conn)
 	stateSourceObject.Load(2, &t.reply)
@@ -92,10 +202,10 @@ func (ti *tupleID) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(5, &ti.netProto)
 }
 
-func (ti *tupleID) afterLoad() {}
+func (ti *tupleID) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (ti *tupleID) StateLoad(stateSourceObject state.Source) {
+func (ti *tupleID) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &ti.srcAddr)
 	stateSourceObject.Load(1, &ti.srcPortOrEchoRequestIdent)
 	stateSourceObject.Load(2, &ti.dstAddr)
@@ -113,7 +223,6 @@ func (cn *conn) StateFields() []string {
 		"ct",
 		"original",
 		"reply",
-		"finalizeOnce",
 		"finalizeResult",
 		"sourceManip",
 		"destinationManip",
@@ -130,27 +239,25 @@ func (cn *conn) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &cn.ct)
 	stateSinkObject.Save(1, &cn.original)
 	stateSinkObject.Save(2, &cn.reply)
-	stateSinkObject.Save(3, &cn.finalizeOnce)
-	stateSinkObject.Save(4, &cn.finalizeResult)
-	stateSinkObject.Save(5, &cn.sourceManip)
-	stateSinkObject.Save(6, &cn.destinationManip)
-	stateSinkObject.Save(7, &cn.tcb)
-	stateSinkObject.Save(8, &cn.lastUsed)
+	stateSinkObject.Save(3, &cn.finalizeResult)
+	stateSinkObject.Save(4, &cn.sourceManip)
+	stateSinkObject.Save(5, &cn.destinationManip)
+	stateSinkObject.Save(6, &cn.tcb)
+	stateSinkObject.Save(7, &cn.lastUsed)
 }
 
-func (cn *conn) afterLoad() {}
+func (cn *conn) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (cn *conn) StateLoad(stateSourceObject state.Source) {
+func (cn *conn) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &cn.ct)
 	stateSourceObject.Load(1, &cn.original)
 	stateSourceObject.Load(2, &cn.reply)
-	stateSourceObject.Load(3, &cn.finalizeOnce)
-	stateSourceObject.Load(4, &cn.finalizeResult)
-	stateSourceObject.Load(5, &cn.sourceManip)
-	stateSourceObject.Load(6, &cn.destinationManip)
-	stateSourceObject.Load(7, &cn.tcb)
-	stateSourceObject.Load(8, &cn.lastUsed)
+	stateSourceObject.Load(3, &cn.finalizeResult)
+	stateSourceObject.Load(4, &cn.sourceManip)
+	stateSourceObject.Load(5, &cn.destinationManip)
+	stateSourceObject.Load(6, &cn.tcb)
+	stateSourceObject.Load(7, &cn.lastUsed)
 }
 
 func (ct *ConnTrack) StateTypeName() string {
@@ -161,7 +268,6 @@ func (ct *ConnTrack) StateFields() []string {
 	return []string{
 		"seed",
 		"clock",
-		"rand",
 		"buckets",
 	}
 }
@@ -173,18 +279,16 @@ func (ct *ConnTrack) StateSave(stateSinkObject state.Sink) {
 	ct.beforeSave()
 	stateSinkObject.Save(0, &ct.seed)
 	stateSinkObject.Save(1, &ct.clock)
-	stateSinkObject.Save(2, &ct.rand)
-	stateSinkObject.Save(3, &ct.buckets)
+	stateSinkObject.Save(2, &ct.buckets)
 }
 
-func (ct *ConnTrack) afterLoad() {}
+func (ct *ConnTrack) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (ct *ConnTrack) StateLoad(stateSourceObject state.Source) {
+func (ct *ConnTrack) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &ct.seed)
 	stateSourceObject.Load(1, &ct.clock)
-	stateSourceObject.Load(2, &ct.rand)
-	stateSourceObject.Load(3, &ct.buckets)
+	stateSourceObject.Load(2, &ct.buckets)
 }
 
 func (bkt *bucket) StateTypeName() string {
@@ -205,67 +309,347 @@ func (bkt *bucket) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &bkt.tuples)
 }
 
-func (bkt *bucket) afterLoad() {}
+func (bkt *bucket) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (bkt *bucket) StateLoad(stateSourceObject state.Source) {
+func (bkt *bucket) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &bkt.tuples)
 }
 
-func (l *groPacketList) StateTypeName() string {
-	return "pkg/tcpip/stack.groPacketList"
+func (l *ICMPRateLimiter) StateTypeName() string {
+	return "pkg/tcpip/stack.ICMPRateLimiter"
 }
 
-func (l *groPacketList) StateFields() []string {
+func (l *ICMPRateLimiter) StateFields() []string {
 	return []string{
-		"head",
-		"tail",
+		"clock",
 	}
 }
 
-func (l *groPacketList) beforeSave() {}
+func (l *ICMPRateLimiter) beforeSave() {}
 
 // +checklocksignore
-func (l *groPacketList) StateSave(stateSinkObject state.Sink) {
+func (l *ICMPRateLimiter) StateSave(stateSinkObject state.Sink) {
 	l.beforeSave()
-	stateSinkObject.Save(0, &l.head)
-	stateSinkObject.Save(1, &l.tail)
+	stateSinkObject.Save(0, &l.clock)
 }
 
-func (l *groPacketList) afterLoad() {}
+func (l *ICMPRateLimiter) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *groPacketList) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &l.head)
-	stateSourceObject.Load(1, &l.tail)
+func (l *ICMPRateLimiter) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &l.clock)
 }
 
-func (e *groPacketEntry) StateTypeName() string {
-	return "pkg/tcpip/stack.groPacketEntry"
+func (a *AcceptTarget) StateTypeName() string {
+	return "pkg/tcpip/stack.AcceptTarget"
 }
 
-func (e *groPacketEntry) StateFields() []string {
+func (a *AcceptTarget) StateFields() []string {
 	return []string{
-		"next",
-		"prev",
+		"NetworkProtocol",
+	}
+}
+
+func (a *AcceptTarget) beforeSave() {}
+
+// +checklocksignore
+func (a *AcceptTarget) StateSave(stateSinkObject state.Sink) {
+	a.beforeSave()
+	stateSinkObject.Save(0, &a.NetworkProtocol)
+}
+
+func (a *AcceptTarget) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (a *AcceptTarget) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &a.NetworkProtocol)
+}
+
+func (d *DropTarget) StateTypeName() string {
+	return "pkg/tcpip/stack.DropTarget"
+}
+
+func (d *DropTarget) StateFields() []string {
+	return []string{
+		"NetworkProtocol",
+	}
+}
+
+func (d *DropTarget) beforeSave() {}
+
+// +checklocksignore
+func (d *DropTarget) StateSave(stateSinkObject state.Sink) {
+	d.beforeSave()
+	stateSinkObject.Save(0, &d.NetworkProtocol)
+}
+
+func (d *DropTarget) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (d *DropTarget) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &d.NetworkProtocol)
+}
+
+func (rt *RejectIPv4Target) StateTypeName() string {
+	return "pkg/tcpip/stack.RejectIPv4Target"
+}
+
+func (rt *RejectIPv4Target) StateFields() []string {
+	return []string{
+		"Handler",
+		"RejectWith",
 	}
 }
 
-func (e *groPacketEntry) beforeSave() {}
+func (rt *RejectIPv4Target) beforeSave() {}
 
 // +checklocksignore
-func (e *groPacketEntry) StateSave(stateSinkObject state.Sink) {
+func (rt *RejectIPv4Target) StateSave(stateSinkObject state.Sink) {
+	rt.beforeSave()
+	stateSinkObject.Save(0, &rt.Handler)
+	stateSinkObject.Save(1, &rt.RejectWith)
+}
+
+func (rt *RejectIPv4Target) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (rt *RejectIPv4Target) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &rt.Handler)
+	stateSourceObject.Load(1, &rt.RejectWith)
+}
+
+func (rt *RejectIPv6Target) StateTypeName() string {
+	return "pkg/tcpip/stack.RejectIPv6Target"
+}
+
+func (rt *RejectIPv6Target) StateFields() []string {
+	return []string{
+		"Handler",
+		"RejectWith",
+	}
+}
+
+func (rt *RejectIPv6Target) beforeSave() {}
+
+// +checklocksignore
+func (rt *RejectIPv6Target) StateSave(stateSinkObject state.Sink) {
+	rt.beforeSave()
+	stateSinkObject.Save(0, &rt.Handler)
+	stateSinkObject.Save(1, &rt.RejectWith)
+}
+
+func (rt *RejectIPv6Target) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (rt *RejectIPv6Target) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &rt.Handler)
+	stateSourceObject.Load(1, &rt.RejectWith)
+}
+
+func (e *ErrorTarget) StateTypeName() string {
+	return "pkg/tcpip/stack.ErrorTarget"
+}
+
+func (e *ErrorTarget) StateFields() []string {
+	return []string{
+		"NetworkProtocol",
+	}
+}
+
+func (e *ErrorTarget) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrorTarget) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
-	stateSinkObject.Save(0, &e.next)
-	stateSinkObject.Save(1, &e.prev)
+	stateSinkObject.Save(0, &e.NetworkProtocol)
 }
 
-func (e *groPacketEntry) afterLoad() {}
+func (e *ErrorTarget) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *groPacketEntry) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &e.next)
-	stateSourceObject.Load(1, &e.prev)
+func (e *ErrorTarget) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &e.NetworkProtocol)
+}
+
+func (u *UserChainTarget) StateTypeName() string {
+	return "pkg/tcpip/stack.UserChainTarget"
+}
+
+func (u *UserChainTarget) StateFields() []string {
+	return []string{
+		"Name",
+		"NetworkProtocol",
+	}
+}
+
+func (u *UserChainTarget) beforeSave() {}
+
+// +checklocksignore
+func (u *UserChainTarget) StateSave(stateSinkObject state.Sink) {
+	u.beforeSave()
+	stateSinkObject.Save(0, &u.Name)
+	stateSinkObject.Save(1, &u.NetworkProtocol)
+}
+
+func (u *UserChainTarget) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (u *UserChainTarget) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &u.Name)
+	stateSourceObject.Load(1, &u.NetworkProtocol)
+}
+
+func (r *ReturnTarget) StateTypeName() string {
+	return "pkg/tcpip/stack.ReturnTarget"
+}
+
+func (r *ReturnTarget) StateFields() []string {
+	return []string{
+		"NetworkProtocol",
+	}
+}
+
+func (r *ReturnTarget) beforeSave() {}
+
+// +checklocksignore
+func (r *ReturnTarget) StateSave(stateSinkObject state.Sink) {
+	r.beforeSave()
+	stateSinkObject.Save(0, &r.NetworkProtocol)
+}
+
+func (r *ReturnTarget) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (r *ReturnTarget) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &r.NetworkProtocol)
+}
+
+func (rt *DNATTarget) StateTypeName() string {
+	return "pkg/tcpip/stack.DNATTarget"
+}
+
+func (rt *DNATTarget) StateFields() []string {
+	return []string{
+		"Addr",
+		"Port",
+		"NetworkProtocol",
+		"ChangeAddress",
+		"ChangePort",
+	}
+}
+
+func (rt *DNATTarget) beforeSave() {}
+
+// +checklocksignore
+func (rt *DNATTarget) StateSave(stateSinkObject state.Sink) {
+	rt.beforeSave()
+	stateSinkObject.Save(0, &rt.Addr)
+	stateSinkObject.Save(1, &rt.Port)
+	stateSinkObject.Save(2, &rt.NetworkProtocol)
+	stateSinkObject.Save(3, &rt.ChangeAddress)
+	stateSinkObject.Save(4, &rt.ChangePort)
+}
+
+func (rt *DNATTarget) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (rt *DNATTarget) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &rt.Addr)
+	stateSourceObject.Load(1, &rt.Port)
+	stateSourceObject.Load(2, &rt.NetworkProtocol)
+	stateSourceObject.Load(3, &rt.ChangeAddress)
+	stateSourceObject.Load(4, &rt.ChangePort)
+}
+
+func (rt *RedirectTarget) StateTypeName() string {
+	return "pkg/tcpip/stack.RedirectTarget"
+}
+
+func (rt *RedirectTarget) StateFields() []string {
+	return []string{
+		"Port",
+		"NetworkProtocol",
+	}
+}
+
+func (rt *RedirectTarget) beforeSave() {}
+
+// +checklocksignore
+func (rt *RedirectTarget) StateSave(stateSinkObject state.Sink) {
+	rt.beforeSave()
+	stateSinkObject.Save(0, &rt.Port)
+	stateSinkObject.Save(1, &rt.NetworkProtocol)
+}
+
+func (rt *RedirectTarget) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (rt *RedirectTarget) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &rt.Port)
+	stateSourceObject.Load(1, &rt.NetworkProtocol)
+}
+
+func (st *SNATTarget) StateTypeName() string {
+	return "pkg/tcpip/stack.SNATTarget"
+}
+
+func (st *SNATTarget) StateFields() []string {
+	return []string{
+		"Addr",
+		"Port",
+		"NetworkProtocol",
+		"ChangeAddress",
+		"ChangePort",
+	}
+}
+
+func (st *SNATTarget) beforeSave() {}
+
+// +checklocksignore
+func (st *SNATTarget) StateSave(stateSinkObject state.Sink) {
+	st.beforeSave()
+	stateSinkObject.Save(0, &st.Addr)
+	stateSinkObject.Save(1, &st.Port)
+	stateSinkObject.Save(2, &st.NetworkProtocol)
+	stateSinkObject.Save(3, &st.ChangeAddress)
+	stateSinkObject.Save(4, &st.ChangePort)
+}
+
+func (st *SNATTarget) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (st *SNATTarget) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &st.Addr)
+	stateSourceObject.Load(1, &st.Port)
+	stateSourceObject.Load(2, &st.NetworkProtocol)
+	stateSourceObject.Load(3, &st.ChangeAddress)
+	stateSourceObject.Load(4, &st.ChangePort)
+}
+
+func (mt *MasqueradeTarget) StateTypeName() string {
+	return "pkg/tcpip/stack.MasqueradeTarget"
+}
+
+func (mt *MasqueradeTarget) StateFields() []string {
+	return []string{
+		"NetworkProtocol",
+	}
+}
+
+func (mt *MasqueradeTarget) beforeSave() {}
+
+// +checklocksignore
+func (mt *MasqueradeTarget) StateSave(stateSinkObject state.Sink) {
+	mt.beforeSave()
+	stateSinkObject.Save(0, &mt.NetworkProtocol)
+}
+
+func (mt *MasqueradeTarget) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (mt *MasqueradeTarget) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &mt.NetworkProtocol)
 }
 
 func (it *IPTables) StateTypeName() string {
@@ -276,7 +660,6 @@ func (it *IPTables) StateFields() []string {
 	return []string{
 		"connections",
 		"reaper",
-		"mu",
 		"v4Tables",
 		"v6Tables",
 		"modified",
@@ -288,21 +671,19 @@ func (it *IPTables) StateSave(stateSinkObject state.Sink) {
 	it.beforeSave()
 	stateSinkObject.Save(0, &it.connections)
 	stateSinkObject.Save(1, &it.reaper)
-	stateSinkObject.Save(2, &it.mu)
-	stateSinkObject.Save(3, &it.v4Tables)
-	stateSinkObject.Save(4, &it.v6Tables)
-	stateSinkObject.Save(5, &it.modified)
+	stateSinkObject.Save(2, &it.v4Tables)
+	stateSinkObject.Save(3, &it.v6Tables)
+	stateSinkObject.Save(4, &it.modified)
 }
 
 // +checklocksignore
-func (it *IPTables) StateLoad(stateSourceObject state.Source) {
+func (it *IPTables) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &it.connections)
 	stateSourceObject.Load(1, &it.reaper)
-	stateSourceObject.Load(2, &it.mu)
-	stateSourceObject.Load(3, &it.v4Tables)
-	stateSourceObject.Load(4, &it.v6Tables)
-	stateSourceObject.Load(5, &it.modified)
-	stateSourceObject.AfterLoad(it.afterLoad)
+	stateSourceObject.Load(2, &it.v4Tables)
+	stateSourceObject.Load(3, &it.v6Tables)
+	stateSourceObject.Load(4, &it.modified)
+	stateSourceObject.AfterLoad(func() { it.afterLoad(ctx) })
 }
 
 func (table *Table) StateTypeName() string {
@@ -327,10 +708,10 @@ func (table *Table) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &table.Underflows)
 }
 
-func (table *Table) afterLoad() {}
+func (table *Table) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (table *Table) StateLoad(stateSourceObject state.Source) {
+func (table *Table) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &table.Rules)
 	stateSourceObject.Load(1, &table.BuiltinChains)
 	stateSourceObject.Load(2, &table.Underflows)
@@ -358,140 +739,640 @@ func (r *Rule) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &r.Target)
 }
 
-func (r *Rule) afterLoad() {}
+func (r *Rule) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (r *Rule) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &r.Filter)
+	stateSourceObject.Load(1, &r.Matchers)
+	stateSourceObject.Load(2, &r.Target)
+}
+
+func (fl *IPHeaderFilter) StateTypeName() string {
+	return "pkg/tcpip/stack.IPHeaderFilter"
+}
+
+func (fl *IPHeaderFilter) StateFields() []string {
+	return []string{
+		"Protocol",
+		"CheckProtocol",
+		"Dst",
+		"DstMask",
+		"DstInvert",
+		"Src",
+		"SrcMask",
+		"SrcInvert",
+		"InputInterface",
+		"InputInterfaceMask",
+		"InputInterfaceInvert",
+		"OutputInterface",
+		"OutputInterfaceMask",
+		"OutputInterfaceInvert",
+	}
+}
+
+func (fl *IPHeaderFilter) beforeSave() {}
+
+// +checklocksignore
+func (fl *IPHeaderFilter) StateSave(stateSinkObject state.Sink) {
+	fl.beforeSave()
+	stateSinkObject.Save(0, &fl.Protocol)
+	stateSinkObject.Save(1, &fl.CheckProtocol)
+	stateSinkObject.Save(2, &fl.Dst)
+	stateSinkObject.Save(3, &fl.DstMask)
+	stateSinkObject.Save(4, &fl.DstInvert)
+	stateSinkObject.Save(5, &fl.Src)
+	stateSinkObject.Save(6, &fl.SrcMask)
+	stateSinkObject.Save(7, &fl.SrcInvert)
+	stateSinkObject.Save(8, &fl.InputInterface)
+	stateSinkObject.Save(9, &fl.InputInterfaceMask)
+	stateSinkObject.Save(10, &fl.InputInterfaceInvert)
+	stateSinkObject.Save(11, &fl.OutputInterface)
+	stateSinkObject.Save(12, &fl.OutputInterfaceMask)
+	stateSinkObject.Save(13, &fl.OutputInterfaceInvert)
+}
+
+func (fl *IPHeaderFilter) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (fl *IPHeaderFilter) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &fl.Protocol)
+	stateSourceObject.Load(1, &fl.CheckProtocol)
+	stateSourceObject.Load(2, &fl.Dst)
+	stateSourceObject.Load(3, &fl.DstMask)
+	stateSourceObject.Load(4, &fl.DstInvert)
+	stateSourceObject.Load(5, &fl.Src)
+	stateSourceObject.Load(6, &fl.SrcMask)
+	stateSourceObject.Load(7, &fl.SrcInvert)
+	stateSourceObject.Load(8, &fl.InputInterface)
+	stateSourceObject.Load(9, &fl.InputInterfaceMask)
+	stateSourceObject.Load(10, &fl.InputInterfaceInvert)
+	stateSourceObject.Load(11, &fl.OutputInterface)
+	stateSourceObject.Load(12, &fl.OutputInterfaceMask)
+	stateSourceObject.Load(13, &fl.OutputInterfaceInvert)
+}
+
+func (d *dynamicCacheEntry) StateTypeName() string {
+	return "pkg/tcpip/stack.dynamicCacheEntry"
+}
+
+func (d *dynamicCacheEntry) StateFields() []string {
+	return []string{
+		"lru",
+		"count",
+	}
+}
+
+func (d *dynamicCacheEntry) beforeSave() {}
+
+// +checklocksignore
+func (d *dynamicCacheEntry) StateSave(stateSinkObject state.Sink) {
+	d.beforeSave()
+	stateSinkObject.Save(0, &d.lru)
+	stateSinkObject.Save(1, &d.count)
+}
+
+func (d *dynamicCacheEntry) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (d *dynamicCacheEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &d.lru)
+	stateSourceObject.Load(1, &d.count)
+}
+
+func (n *neighborCacheMu) StateTypeName() string {
+	return "pkg/tcpip/stack.neighborCacheMu"
+}
+
+func (n *neighborCacheMu) StateFields() []string {
+	return []string{
+		"cache",
+		"dynamic",
+	}
+}
+
+func (n *neighborCacheMu) beforeSave() {}
+
+// +checklocksignore
+func (n *neighborCacheMu) StateSave(stateSinkObject state.Sink) {
+	n.beforeSave()
+	stateSinkObject.Save(0, &n.cache)
+	stateSinkObject.Save(1, &n.dynamic)
+}
+
+func (n *neighborCacheMu) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (n *neighborCacheMu) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &n.cache)
+	stateSourceObject.Load(1, &n.dynamic)
+}
+
+func (n *neighborCache) StateTypeName() string {
+	return "pkg/tcpip/stack.neighborCache"
+}
+
+func (n *neighborCache) StateFields() []string {
+	return []string{
+		"nic",
+		"state",
+		"linkRes",
+		"mu",
+	}
+}
+
+func (n *neighborCache) beforeSave() {}
+
+// +checklocksignore
+func (n *neighborCache) StateSave(stateSinkObject state.Sink) {
+	n.beforeSave()
+	stateSinkObject.Save(0, &n.nic)
+	stateSinkObject.Save(1, &n.state)
+	stateSinkObject.Save(2, &n.linkRes)
+	stateSinkObject.Save(3, &n.mu)
+}
+
+func (n *neighborCache) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (n *neighborCache) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &n.nic)
+	stateSourceObject.Load(1, &n.state)
+	stateSourceObject.Load(2, &n.linkRes)
+	stateSourceObject.Load(3, &n.mu)
+}
+
+func (l *neighborEntryList) StateTypeName() string {
+	return "pkg/tcpip/stack.neighborEntryList"
+}
+
+func (l *neighborEntryList) StateFields() []string {
+	return []string{
+		"head",
+		"tail",
+	}
+}
+
+func (l *neighborEntryList) beforeSave() {}
+
+// +checklocksignore
+func (l *neighborEntryList) StateSave(stateSinkObject state.Sink) {
+	l.beforeSave()
+	stateSinkObject.Save(0, &l.head)
+	stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *neighborEntryList) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (l *neighborEntryList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &l.head)
+	stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *neighborEntryEntry) StateTypeName() string {
+	return "pkg/tcpip/stack.neighborEntryEntry"
+}
+
+func (e *neighborEntryEntry) StateFields() []string {
+	return []string{
+		"next",
+		"prev",
+	}
+}
+
+func (e *neighborEntryEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *neighborEntryEntry) StateSave(stateSinkObject state.Sink) {
+	e.beforeSave()
+	stateSinkObject.Save(0, &e.next)
+	stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *neighborEntryEntry) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (e *neighborEntryEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &e.next)
+	stateSourceObject.Load(1, &e.prev)
+}
+
+func (l *linkResolver) StateTypeName() string {
+	return "pkg/tcpip/stack.linkResolver"
+}
+
+func (l *linkResolver) StateFields() []string {
+	return []string{
+		"resolver",
+		"neigh",
+	}
+}
+
+func (l *linkResolver) beforeSave() {}
+
+// +checklocksignore
+func (l *linkResolver) StateSave(stateSinkObject state.Sink) {
+	l.beforeSave()
+	stateSinkObject.Save(0, &l.resolver)
+	stateSinkObject.Save(1, &l.neigh)
+}
+
+func (l *linkResolver) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (l *linkResolver) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &l.resolver)
+	stateSourceObject.Load(1, &l.neigh)
+}
+
+func (n *nic) StateTypeName() string {
+	return "pkg/tcpip/stack.nic"
+}
+
+func (n *nic) StateFields() []string {
+	return []string{
+		"NetworkLinkEndpoint",
+		"stack",
+		"id",
+		"name",
+		"context",
+		"stats",
+		"networkEndpoints",
+		"linkAddrResolvers",
+		"duplicateAddressDetectors",
+		"enabled",
+		"spoofing",
+		"promiscuous",
+		"linkResQueue",
+		"packetEPs",
+		"qDisc",
+		"deliverLinkPackets",
+		"Primary",
+	}
+}
+
+func (n *nic) beforeSave() {}
+
+// +checklocksignore
+func (n *nic) StateSave(stateSinkObject state.Sink) {
+	n.beforeSave()
+	stateSinkObject.Save(0, &n.NetworkLinkEndpoint)
+	stateSinkObject.Save(1, &n.stack)
+	stateSinkObject.Save(2, &n.id)
+	stateSinkObject.Save(3, &n.name)
+	stateSinkObject.Save(4, &n.context)
+	stateSinkObject.Save(5, &n.stats)
+	stateSinkObject.Save(6, &n.networkEndpoints)
+	stateSinkObject.Save(7, &n.linkAddrResolvers)
+	stateSinkObject.Save(8, &n.duplicateAddressDetectors)
+	stateSinkObject.Save(9, &n.enabled)
+	stateSinkObject.Save(10, &n.spoofing)
+	stateSinkObject.Save(11, &n.promiscuous)
+	stateSinkObject.Save(12, &n.linkResQueue)
+	stateSinkObject.Save(13, &n.packetEPs)
+	stateSinkObject.Save(14, &n.qDisc)
+	stateSinkObject.Save(15, &n.deliverLinkPackets)
+	stateSinkObject.Save(16, &n.Primary)
+}
+
+func (n *nic) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (n *nic) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &n.NetworkLinkEndpoint)
+	stateSourceObject.Load(1, &n.stack)
+	stateSourceObject.Load(2, &n.id)
+	stateSourceObject.Load(3, &n.name)
+	stateSourceObject.Load(4, &n.context)
+	stateSourceObject.Load(5, &n.stats)
+	stateSourceObject.Load(6, &n.networkEndpoints)
+	stateSourceObject.Load(7, &n.linkAddrResolvers)
+	stateSourceObject.Load(8, &n.duplicateAddressDetectors)
+	stateSourceObject.Load(9, &n.enabled)
+	stateSourceObject.Load(10, &n.spoofing)
+	stateSourceObject.Load(11, &n.promiscuous)
+	stateSourceObject.Load(12, &n.linkResQueue)
+	stateSourceObject.Load(13, &n.packetEPs)
+	stateSourceObject.Load(14, &n.qDisc)
+	stateSourceObject.Load(15, &n.deliverLinkPackets)
+	stateSourceObject.Load(16, &n.Primary)
+}
+
+func (p *packetEndpointList) StateTypeName() string {
+	return "pkg/tcpip/stack.packetEndpointList"
+}
+
+func (p *packetEndpointList) StateFields() []string {
+	return []string{
+		"mu",
+		"eps",
+	}
+}
+
+func (p *packetEndpointList) beforeSave() {}
+
+// +checklocksignore
+func (p *packetEndpointList) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.mu)
+	stateSinkObject.Save(1, &p.eps)
+}
+
+func (p *packetEndpointList) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *packetEndpointList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.mu)
+	stateSourceObject.Load(1, &p.eps)
+}
+
+func (qDisc *delegatingQueueingDiscipline) StateTypeName() string {
+	return "pkg/tcpip/stack.delegatingQueueingDiscipline"
+}
+
+func (qDisc *delegatingQueueingDiscipline) StateFields() []string {
+	return []string{
+		"LinkWriter",
+	}
+}
+
+func (qDisc *delegatingQueueingDiscipline) beforeSave() {}
+
+// +checklocksignore
+func (qDisc *delegatingQueueingDiscipline) StateSave(stateSinkObject state.Sink) {
+	qDisc.beforeSave()
+	stateSinkObject.Save(0, &qDisc.LinkWriter)
+}
+
+func (qDisc *delegatingQueueingDiscipline) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (qDisc *delegatingQueueingDiscipline) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &qDisc.LinkWriter)
+}
+
+func (s *sharedStats) StateTypeName() string {
+	return "pkg/tcpip/stack.sharedStats"
+}
+
+func (s *sharedStats) StateFields() []string {
+	return []string{
+		"local",
+		"multiCounterNICStats",
+	}
+}
+
+func (s *sharedStats) beforeSave() {}
+
+// +checklocksignore
+func (s *sharedStats) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.local)
+	stateSinkObject.Save(1, &s.multiCounterNICStats)
+}
+
+func (s *sharedStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *sharedStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.local)
+	stateSourceObject.Load(1, &s.multiCounterNICStats)
+}
+
+func (m *multiCounterNICPacketStats) StateTypeName() string {
+	return "pkg/tcpip/stack.multiCounterNICPacketStats"
+}
+
+func (m *multiCounterNICPacketStats) StateFields() []string {
+	return []string{
+		"packets",
+		"bytes",
+	}
+}
+
+func (m *multiCounterNICPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterNICPacketStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.packets)
+	stateSinkObject.Save(1, &m.bytes)
+}
+
+func (m *multiCounterNICPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterNICPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.packets)
+	stateSourceObject.Load(1, &m.bytes)
+}
+
+func (m *multiCounterNICNeighborStats) StateTypeName() string {
+	return "pkg/tcpip/stack.multiCounterNICNeighborStats"
+}
+
+func (m *multiCounterNICNeighborStats) StateFields() []string {
+	return []string{
+		"unreachableEntryLookups",
+		"droppedConfirmationForNoninitiatedNeighbor",
+		"droppedInvalidLinkAddressConfirmations",
+	}
+}
+
+func (m *multiCounterNICNeighborStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterNICNeighborStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.unreachableEntryLookups)
+	stateSinkObject.Save(1, &m.droppedConfirmationForNoninitiatedNeighbor)
+	stateSinkObject.Save(2, &m.droppedInvalidLinkAddressConfirmations)
+}
+
+func (m *multiCounterNICNeighborStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *multiCounterNICNeighborStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.unreachableEntryLookups)
+	stateSourceObject.Load(1, &m.droppedConfirmationForNoninitiatedNeighbor)
+	stateSourceObject.Load(2, &m.droppedInvalidLinkAddressConfirmations)
+}
+
+func (m *multiCounterNICStats) StateTypeName() string {
+	return "pkg/tcpip/stack.multiCounterNICStats"
+}
+
+func (m *multiCounterNICStats) StateFields() []string {
+	return []string{
+		"unknownL3ProtocolRcvdPacketCounts",
+		"unknownL4ProtocolRcvdPacketCounts",
+		"malformedL4RcvdPackets",
+		"tx",
+		"txPacketsDroppedNoBufferSpace",
+		"rx",
+		"disabledRx",
+		"neighbor",
+	}
+}
+
+func (m *multiCounterNICStats) beforeSave() {}
+
+// +checklocksignore
+func (m *multiCounterNICStats) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.unknownL3ProtocolRcvdPacketCounts)
+	stateSinkObject.Save(1, &m.unknownL4ProtocolRcvdPacketCounts)
+	stateSinkObject.Save(2, &m.malformedL4RcvdPackets)
+	stateSinkObject.Save(3, &m.tx)
+	stateSinkObject.Save(4, &m.txPacketsDroppedNoBufferSpace)
+	stateSinkObject.Save(5, &m.rx)
+	stateSinkObject.Save(6, &m.disabledRx)
+	stateSinkObject.Save(7, &m.neighbor)
+}
+
+func (m *multiCounterNICStats) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *Rule) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &r.Filter)
-	stateSourceObject.Load(1, &r.Matchers)
-	stateSourceObject.Load(2, &r.Target)
+func (m *multiCounterNICStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.unknownL3ProtocolRcvdPacketCounts)
+	stateSourceObject.Load(1, &m.unknownL4ProtocolRcvdPacketCounts)
+	stateSourceObject.Load(2, &m.malformedL4RcvdPackets)
+	stateSourceObject.Load(3, &m.tx)
+	stateSourceObject.Load(4, &m.txPacketsDroppedNoBufferSpace)
+	stateSourceObject.Load(5, &m.rx)
+	stateSourceObject.Load(6, &m.disabledRx)
+	stateSourceObject.Load(7, &m.neighbor)
 }
 
-func (fl *IPHeaderFilter) StateTypeName() string {
-	return "pkg/tcpip/stack.IPHeaderFilter"
+func (c *NUDConfigurations) StateTypeName() string {
+	return "pkg/tcpip/stack.NUDConfigurations"
 }
 
-func (fl *IPHeaderFilter) StateFields() []string {
+func (c *NUDConfigurations) StateFields() []string {
 	return []string{
-		"Protocol",
-		"CheckProtocol",
-		"Dst",
-		"DstMask",
-		"DstInvert",
-		"Src",
-		"SrcMask",
-		"SrcInvert",
-		"InputInterface",
-		"InputInterfaceMask",
-		"InputInterfaceInvert",
-		"OutputInterface",
-		"OutputInterfaceMask",
-		"OutputInterfaceInvert",
+		"BaseReachableTime",
+		"LearnBaseReachableTime",
+		"MinRandomFactor",
+		"MaxRandomFactor",
+		"RetransmitTimer",
+		"LearnRetransmitTimer",
+		"DelayFirstProbeTime",
+		"MaxMulticastProbes",
+		"MaxUnicastProbes",
+		"MaxAnycastDelayTime",
+		"MaxReachabilityConfirmations",
 	}
 }
 
-func (fl *IPHeaderFilter) beforeSave() {}
+func (c *NUDConfigurations) beforeSave() {}
 
 // +checklocksignore
-func (fl *IPHeaderFilter) StateSave(stateSinkObject state.Sink) {
-	fl.beforeSave()
-	stateSinkObject.Save(0, &fl.Protocol)
-	stateSinkObject.Save(1, &fl.CheckProtocol)
-	stateSinkObject.Save(2, &fl.Dst)
-	stateSinkObject.Save(3, &fl.DstMask)
-	stateSinkObject.Save(4, &fl.DstInvert)
-	stateSinkObject.Save(5, &fl.Src)
-	stateSinkObject.Save(6, &fl.SrcMask)
-	stateSinkObject.Save(7, &fl.SrcInvert)
-	stateSinkObject.Save(8, &fl.InputInterface)
-	stateSinkObject.Save(9, &fl.InputInterfaceMask)
-	stateSinkObject.Save(10, &fl.InputInterfaceInvert)
-	stateSinkObject.Save(11, &fl.OutputInterface)
-	stateSinkObject.Save(12, &fl.OutputInterfaceMask)
-	stateSinkObject.Save(13, &fl.OutputInterfaceInvert)
+func (c *NUDConfigurations) StateSave(stateSinkObject state.Sink) {
+	c.beforeSave()
+	stateSinkObject.Save(0, &c.BaseReachableTime)
+	stateSinkObject.Save(1, &c.LearnBaseReachableTime)
+	stateSinkObject.Save(2, &c.MinRandomFactor)
+	stateSinkObject.Save(3, &c.MaxRandomFactor)
+	stateSinkObject.Save(4, &c.RetransmitTimer)
+	stateSinkObject.Save(5, &c.LearnRetransmitTimer)
+	stateSinkObject.Save(6, &c.DelayFirstProbeTime)
+	stateSinkObject.Save(7, &c.MaxMulticastProbes)
+	stateSinkObject.Save(8, &c.MaxUnicastProbes)
+	stateSinkObject.Save(9, &c.MaxAnycastDelayTime)
+	stateSinkObject.Save(10, &c.MaxReachabilityConfirmations)
 }
 
-func (fl *IPHeaderFilter) afterLoad() {}
+func (c *NUDConfigurations) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (fl *IPHeaderFilter) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &fl.Protocol)
-	stateSourceObject.Load(1, &fl.CheckProtocol)
-	stateSourceObject.Load(2, &fl.Dst)
-	stateSourceObject.Load(3, &fl.DstMask)
-	stateSourceObject.Load(4, &fl.DstInvert)
-	stateSourceObject.Load(5, &fl.Src)
-	stateSourceObject.Load(6, &fl.SrcMask)
-	stateSourceObject.Load(7, &fl.SrcInvert)
-	stateSourceObject.Load(8, &fl.InputInterface)
-	stateSourceObject.Load(9, &fl.InputInterfaceMask)
-	stateSourceObject.Load(10, &fl.InputInterfaceInvert)
-	stateSourceObject.Load(11, &fl.OutputInterface)
-	stateSourceObject.Load(12, &fl.OutputInterfaceMask)
-	stateSourceObject.Load(13, &fl.OutputInterfaceInvert)
+func (c *NUDConfigurations) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &c.BaseReachableTime)
+	stateSourceObject.Load(1, &c.LearnBaseReachableTime)
+	stateSourceObject.Load(2, &c.MinRandomFactor)
+	stateSourceObject.Load(3, &c.MaxRandomFactor)
+	stateSourceObject.Load(4, &c.RetransmitTimer)
+	stateSourceObject.Load(5, &c.LearnRetransmitTimer)
+	stateSourceObject.Load(6, &c.DelayFirstProbeTime)
+	stateSourceObject.Load(7, &c.MaxMulticastProbes)
+	stateSourceObject.Load(8, &c.MaxUnicastProbes)
+	stateSourceObject.Load(9, &c.MaxAnycastDelayTime)
+	stateSourceObject.Load(10, &c.MaxReachabilityConfirmations)
 }
 
-func (l *neighborEntryList) StateTypeName() string {
-	return "pkg/tcpip/stack.neighborEntryList"
+func (n *nudStateMu) StateTypeName() string {
+	return "pkg/tcpip/stack.nudStateMu"
 }
 
-func (l *neighborEntryList) StateFields() []string {
+func (n *nudStateMu) StateFields() []string {
 	return []string{
-		"head",
-		"tail",
+		"config",
+		"reachableTime",
+		"expiration",
+		"prevBaseReachableTime",
+		"prevMinRandomFactor",
+		"prevMaxRandomFactor",
 	}
 }
 
-func (l *neighborEntryList) beforeSave() {}
+func (n *nudStateMu) beforeSave() {}
 
 // +checklocksignore
-func (l *neighborEntryList) StateSave(stateSinkObject state.Sink) {
-	l.beforeSave()
-	stateSinkObject.Save(0, &l.head)
-	stateSinkObject.Save(1, &l.tail)
+func (n *nudStateMu) StateSave(stateSinkObject state.Sink) {
+	n.beforeSave()
+	stateSinkObject.Save(0, &n.config)
+	stateSinkObject.Save(1, &n.reachableTime)
+	stateSinkObject.Save(2, &n.expiration)
+	stateSinkObject.Save(3, &n.prevBaseReachableTime)
+	stateSinkObject.Save(4, &n.prevMinRandomFactor)
+	stateSinkObject.Save(5, &n.prevMaxRandomFactor)
 }
 
-func (l *neighborEntryList) afterLoad() {}
+func (n *nudStateMu) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *neighborEntryList) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &l.head)
-	stateSourceObject.Load(1, &l.tail)
+func (n *nudStateMu) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &n.config)
+	stateSourceObject.Load(1, &n.reachableTime)
+	stateSourceObject.Load(2, &n.expiration)
+	stateSourceObject.Load(3, &n.prevBaseReachableTime)
+	stateSourceObject.Load(4, &n.prevMinRandomFactor)
+	stateSourceObject.Load(5, &n.prevMaxRandomFactor)
 }
 
-func (e *neighborEntryEntry) StateTypeName() string {
-	return "pkg/tcpip/stack.neighborEntryEntry"
+func (s *NUDState) StateTypeName() string {
+	return "pkg/tcpip/stack.NUDState"
 }
 
-func (e *neighborEntryEntry) StateFields() []string {
+func (s *NUDState) StateFields() []string {
 	return []string{
-		"next",
-		"prev",
+		"clock",
+		"mu",
 	}
 }
 
-func (e *neighborEntryEntry) beforeSave() {}
+func (s *NUDState) beforeSave() {}
 
 // +checklocksignore
-func (e *neighborEntryEntry) StateSave(stateSinkObject state.Sink) {
-	e.beforeSave()
-	stateSinkObject.Save(0, &e.next)
-	stateSinkObject.Save(1, &e.prev)
+func (s *NUDState) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.clock)
+	stateSinkObject.Save(1, &s.mu)
 }
 
-func (e *neighborEntryEntry) afterLoad() {}
+func (s *NUDState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *neighborEntryEntry) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &e.next)
-	stateSourceObject.Load(1, &e.prev)
+func (s *NUDState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.clock)
+	stateSourceObject.Load(1, &s.mu)
 }
 
-func (p *PacketBuffer) StateTypeName() string {
+func (pk *PacketBuffer) StateTypeName() string {
 	return "pkg/tcpip/stack.PacketBuffer"
 }
 
-func (p *PacketBuffer) StateFields() []string {
+func (pk *PacketBuffer) StateFields() []string {
 	return []string{
 		"packetBufferRefs",
 		"buf",
@@ -515,55 +1396,55 @@ func (p *PacketBuffer) StateFields() []string {
 	}
 }
 
-func (p *PacketBuffer) beforeSave() {}
-
-// +checklocksignore
-func (p *PacketBuffer) StateSave(stateSinkObject state.Sink) {
-	p.beforeSave()
-	stateSinkObject.Save(0, &p.packetBufferRefs)
-	stateSinkObject.Save(1, &p.buf)
-	stateSinkObject.Save(2, &p.reserved)
-	stateSinkObject.Save(3, &p.pushed)
-	stateSinkObject.Save(4, &p.consumed)
-	stateSinkObject.Save(5, &p.headers)
-	stateSinkObject.Save(6, &p.NetworkProtocolNumber)
-	stateSinkObject.Save(7, &p.TransportProtocolNumber)
-	stateSinkObject.Save(8, &p.Hash)
-	stateSinkObject.Save(9, &p.Owner)
-	stateSinkObject.Save(10, &p.EgressRoute)
-	stateSinkObject.Save(11, &p.GSOOptions)
-	stateSinkObject.Save(12, &p.snatDone)
-	stateSinkObject.Save(13, &p.dnatDone)
-	stateSinkObject.Save(14, &p.PktType)
-	stateSinkObject.Save(15, &p.NICID)
-	stateSinkObject.Save(16, &p.RXChecksumValidated)
-	stateSinkObject.Save(17, &p.NetworkPacketInfo)
-	stateSinkObject.Save(18, &p.tuple)
-}
-
-func (p *PacketBuffer) afterLoad() {}
-
-// +checklocksignore
-func (p *PacketBuffer) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &p.packetBufferRefs)
-	stateSourceObject.Load(1, &p.buf)
-	stateSourceObject.Load(2, &p.reserved)
-	stateSourceObject.Load(3, &p.pushed)
-	stateSourceObject.Load(4, &p.consumed)
-	stateSourceObject.Load(5, &p.headers)
-	stateSourceObject.Load(6, &p.NetworkProtocolNumber)
-	stateSourceObject.Load(7, &p.TransportProtocolNumber)
-	stateSourceObject.Load(8, &p.Hash)
-	stateSourceObject.Load(9, &p.Owner)
-	stateSourceObject.Load(10, &p.EgressRoute)
-	stateSourceObject.Load(11, &p.GSOOptions)
-	stateSourceObject.Load(12, &p.snatDone)
-	stateSourceObject.Load(13, &p.dnatDone)
-	stateSourceObject.Load(14, &p.PktType)
-	stateSourceObject.Load(15, &p.NICID)
-	stateSourceObject.Load(16, &p.RXChecksumValidated)
-	stateSourceObject.Load(17, &p.NetworkPacketInfo)
-	stateSourceObject.Load(18, &p.tuple)
+func (pk *PacketBuffer) beforeSave() {}
+
+// +checklocksignore
+func (pk *PacketBuffer) StateSave(stateSinkObject state.Sink) {
+	pk.beforeSave()
+	stateSinkObject.Save(0, &pk.packetBufferRefs)
+	stateSinkObject.Save(1, &pk.buf)
+	stateSinkObject.Save(2, &pk.reserved)
+	stateSinkObject.Save(3, &pk.pushed)
+	stateSinkObject.Save(4, &pk.consumed)
+	stateSinkObject.Save(5, &pk.headers)
+	stateSinkObject.Save(6, &pk.NetworkProtocolNumber)
+	stateSinkObject.Save(7, &pk.TransportProtocolNumber)
+	stateSinkObject.Save(8, &pk.Hash)
+	stateSinkObject.Save(9, &pk.Owner)
+	stateSinkObject.Save(10, &pk.EgressRoute)
+	stateSinkObject.Save(11, &pk.GSOOptions)
+	stateSinkObject.Save(12, &pk.snatDone)
+	stateSinkObject.Save(13, &pk.dnatDone)
+	stateSinkObject.Save(14, &pk.PktType)
+	stateSinkObject.Save(15, &pk.NICID)
+	stateSinkObject.Save(16, &pk.RXChecksumValidated)
+	stateSinkObject.Save(17, &pk.NetworkPacketInfo)
+	stateSinkObject.Save(18, &pk.tuple)
+}
+
+func (pk *PacketBuffer) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (pk *PacketBuffer) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &pk.packetBufferRefs)
+	stateSourceObject.Load(1, &pk.buf)
+	stateSourceObject.Load(2, &pk.reserved)
+	stateSourceObject.Load(3, &pk.pushed)
+	stateSourceObject.Load(4, &pk.consumed)
+	stateSourceObject.Load(5, &pk.headers)
+	stateSourceObject.Load(6, &pk.NetworkProtocolNumber)
+	stateSourceObject.Load(7, &pk.TransportProtocolNumber)
+	stateSourceObject.Load(8, &pk.Hash)
+	stateSourceObject.Load(9, &pk.Owner)
+	stateSourceObject.Load(10, &pk.EgressRoute)
+	stateSourceObject.Load(11, &pk.GSOOptions)
+	stateSourceObject.Load(12, &pk.snatDone)
+	stateSourceObject.Load(13, &pk.dnatDone)
+	stateSourceObject.Load(14, &pk.PktType)
+	stateSourceObject.Load(15, &pk.NICID)
+	stateSourceObject.Load(16, &pk.RXChecksumValidated)
+	stateSourceObject.Load(17, &pk.NetworkPacketInfo)
+	stateSourceObject.Load(18, &pk.tuple)
 }
 
 func (h *headerInfo) StateTypeName() string {
@@ -586,10 +1467,10 @@ func (h *headerInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &h.length)
 }
 
-func (h *headerInfo) afterLoad() {}
+func (h *headerInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (h *headerInfo) StateLoad(stateSourceObject state.Source) {
+func (h *headerInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &h.offset)
 	stateSourceObject.Load(1, &h.length)
 }
@@ -612,10 +1493,10 @@ func (d *PacketData) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &d.pk)
 }
 
-func (d *PacketData) afterLoad() {}
+func (d *PacketData) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (d *PacketData) StateLoad(stateSourceObject state.Source) {
+func (d *PacketData) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &d.pk)
 }
 
@@ -637,10 +1518,10 @@ func (pl *PacketBufferList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &pl.pbs)
 }
 
-func (pl *PacketBufferList) afterLoad() {}
+func (pl *PacketBufferList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (pl *PacketBufferList) StateLoad(stateSourceObject state.Source) {
+func (pl *PacketBufferList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &pl.pbs)
 }
 
@@ -663,9 +1544,93 @@ func (r *packetBufferRefs) StateSave(stateSinkObject state.Sink) {
 }
 
 // +checklocksignore
-func (r *packetBufferRefs) StateLoad(stateSourceObject state.Source) {
+func (r *packetBufferRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.refCount)
-	stateSourceObject.AfterLoad(r.afterLoad)
+	stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) })
+}
+
+func (p *pendingPacket) StateTypeName() string {
+	return "pkg/tcpip/stack.pendingPacket"
+}
+
+func (p *pendingPacket) StateFields() []string {
+	return []string{
+		"routeInfo",
+		"pkt",
+	}
+}
+
+func (p *pendingPacket) beforeSave() {}
+
+// +checklocksignore
+func (p *pendingPacket) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.routeInfo)
+	stateSinkObject.Save(1, &p.pkt)
+}
+
+func (p *pendingPacket) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *pendingPacket) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.routeInfo)
+	stateSourceObject.Load(1, &p.pkt)
+}
+
+func (p *packetsPendingLinkResolutionMu) StateTypeName() string {
+	return "pkg/tcpip/stack.packetsPendingLinkResolutionMu"
+}
+
+func (p *packetsPendingLinkResolutionMu) StateFields() []string {
+	return []string{
+		"packets",
+		"cancelChans",
+	}
+}
+
+func (p *packetsPendingLinkResolutionMu) beforeSave() {}
+
+// +checklocksignore
+func (p *packetsPendingLinkResolutionMu) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.packets)
+	stateSinkObject.Save(1, &p.cancelChans)
+}
+
+func (p *packetsPendingLinkResolutionMu) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *packetsPendingLinkResolutionMu) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.packets)
+	stateSourceObject.Load(1, &p.cancelChans)
+}
+
+func (f *packetsPendingLinkResolution) StateTypeName() string {
+	return "pkg/tcpip/stack.packetsPendingLinkResolution"
+}
+
+func (f *packetsPendingLinkResolution) StateFields() []string {
+	return []string{
+		"nic",
+		"mu",
+	}
+}
+
+func (f *packetsPendingLinkResolution) beforeSave() {}
+
+// +checklocksignore
+func (f *packetsPendingLinkResolution) StateSave(stateSinkObject state.Sink) {
+	f.beforeSave()
+	stateSinkObject.Save(0, &f.nic)
+	stateSinkObject.Save(1, &f.mu)
+}
+
+func (f *packetsPendingLinkResolution) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (f *packetsPendingLinkResolution) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &f.nic)
+	stateSourceObject.Load(1, &f.mu)
 }
 
 func (t *TransportEndpointID) StateTypeName() string {
@@ -692,10 +1657,10 @@ func (t *TransportEndpointID) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(3, &t.RemoteAddress)
 }
 
-func (t *TransportEndpointID) afterLoad() {}
+func (t *TransportEndpointID) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TransportEndpointID) StateLoad(stateSourceObject state.Source) {
+func (t *TransportEndpointID) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.LocalPort)
 	stateSourceObject.Load(1, &t.LocalAddress)
 	stateSourceObject.Load(2, &t.RemotePort)
@@ -722,14 +1687,101 @@ func (n *NetworkPacketInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &n.IsForwardedPacket)
 }
 
-func (n *NetworkPacketInfo) afterLoad() {}
+func (n *NetworkPacketInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (n *NetworkPacketInfo) StateLoad(stateSourceObject state.Source) {
+func (n *NetworkPacketInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &n.LocalAddressBroadcast)
 	stateSourceObject.Load(1, &n.IsForwardedPacket)
 }
 
+func (lifetimes *AddressLifetimes) StateTypeName() string {
+	return "pkg/tcpip/stack.AddressLifetimes"
+}
+
+func (lifetimes *AddressLifetimes) StateFields() []string {
+	return []string{
+		"Deprecated",
+		"PreferredUntil",
+		"ValidUntil",
+	}
+}
+
+func (lifetimes *AddressLifetimes) beforeSave() {}
+
+// +checklocksignore
+func (lifetimes *AddressLifetimes) StateSave(stateSinkObject state.Sink) {
+	lifetimes.beforeSave()
+	stateSinkObject.Save(0, &lifetimes.Deprecated)
+	stateSinkObject.Save(1, &lifetimes.PreferredUntil)
+	stateSinkObject.Save(2, &lifetimes.ValidUntil)
+}
+
+func (lifetimes *AddressLifetimes) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (lifetimes *AddressLifetimes) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &lifetimes.Deprecated)
+	stateSourceObject.Load(1, &lifetimes.PreferredUntil)
+	stateSourceObject.Load(2, &lifetimes.ValidUntil)
+}
+
+func (u *UnicastSourceAndMulticastDestination) StateTypeName() string {
+	return "pkg/tcpip/stack.UnicastSourceAndMulticastDestination"
+}
+
+func (u *UnicastSourceAndMulticastDestination) StateFields() []string {
+	return []string{
+		"Source",
+		"Destination",
+	}
+}
+
+func (u *UnicastSourceAndMulticastDestination) beforeSave() {}
+
+// +checklocksignore
+func (u *UnicastSourceAndMulticastDestination) StateSave(stateSinkObject state.Sink) {
+	u.beforeSave()
+	stateSinkObject.Save(0, &u.Source)
+	stateSinkObject.Save(1, &u.Destination)
+}
+
+func (u *UnicastSourceAndMulticastDestination) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (u *UnicastSourceAndMulticastDestination) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &u.Source)
+	stateSourceObject.Load(1, &u.Destination)
+}
+
+func (c *DADConfigurations) StateTypeName() string {
+	return "pkg/tcpip/stack.DADConfigurations"
+}
+
+func (c *DADConfigurations) StateFields() []string {
+	return []string{
+		"DupAddrDetectTransmits",
+		"RetransmitTimer",
+	}
+}
+
+func (c *DADConfigurations) beforeSave() {}
+
+// +checklocksignore
+func (c *DADConfigurations) StateSave(stateSinkObject state.Sink) {
+	c.beforeSave()
+	stateSinkObject.Save(0, &c.DupAddrDetectTransmits)
+	stateSinkObject.Save(1, &c.RetransmitTimer)
+}
+
+func (c *DADConfigurations) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (c *DADConfigurations) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &c.DupAddrDetectTransmits)
+	stateSourceObject.Load(1, &c.RetransmitTimer)
+}
+
 func (g *GSOType) StateTypeName() string {
 	return "pkg/tcpip/stack.GSOType"
 }
@@ -766,10 +1818,10 @@ func (g *GSO) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(5, &g.MaxSize)
 }
 
-func (g *GSO) afterLoad() {}
+func (g *GSO) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (g *GSO) StateLoad(stateSourceObject state.Source) {
+func (g *GSO) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &g.Type)
 	stateSourceObject.Load(1, &g.NeedsCsum)
 	stateSourceObject.Load(2, &g.CsumOffset)
@@ -806,10 +1858,10 @@ func (r *routeInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(5, &r.Loop)
 }
 
-func (r *routeInfo) afterLoad() {}
+func (r *routeInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *routeInfo) StateLoad(stateSourceObject state.Source) {
+func (r *routeInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.RemoteAddress)
 	stateSourceObject.Load(1, &r.LocalAddress)
 	stateSourceObject.Load(2, &r.LocalLinkAddress)
@@ -838,14 +1890,133 @@ func (r *RouteInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &r.RemoteLinkAddress)
 }
 
-func (r *RouteInfo) afterLoad() {}
+func (r *RouteInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *RouteInfo) StateLoad(stateSourceObject state.Source) {
+func (r *RouteInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.routeInfo)
 	stateSourceObject.Load(1, &r.RemoteLinkAddress)
 }
 
+func (t *transportProtocolState) StateTypeName() string {
+	return "pkg/tcpip/stack.transportProtocolState"
+}
+
+func (t *transportProtocolState) StateFields() []string {
+	return []string{
+		"proto",
+	}
+}
+
+func (t *transportProtocolState) beforeSave() {}
+
+// +checklocksignore
+func (t *transportProtocolState) StateSave(stateSinkObject state.Sink) {
+	t.beforeSave()
+	stateSinkObject.Save(0, &t.proto)
+}
+
+func (t *transportProtocolState) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (t *transportProtocolState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &t.proto)
+}
+
+func (s *Stack) StateTypeName() string {
+	return "pkg/tcpip/stack.Stack"
+}
+
+func (s *Stack) StateFields() []string {
+	return []string{
+		"transportProtocols",
+		"networkProtocols",
+		"rawFactory",
+		"packetEndpointWriteSupported",
+		"demux",
+		"stats",
+		"routeTable",
+		"nics",
+		"defaultForwardingEnabled",
+		"nicIDGen",
+		"cleanupEndpoints",
+		"PortManager",
+		"clock",
+		"handleLocal",
+		"restoredEndpoints",
+		"resumableEndpoints",
+		"icmpRateLimiter",
+		"seed",
+		"nudConfigs",
+		"nudDisp",
+		"sendBufferSize",
+		"receiveBufferSize",
+		"tcpInvalidRateLimit",
+		"tsOffsetSecret",
+	}
+}
+
+func (s *Stack) beforeSave() {}
+
+// +checklocksignore
+func (s *Stack) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.transportProtocols)
+	stateSinkObject.Save(1, &s.networkProtocols)
+	stateSinkObject.Save(2, &s.rawFactory)
+	stateSinkObject.Save(3, &s.packetEndpointWriteSupported)
+	stateSinkObject.Save(4, &s.demux)
+	stateSinkObject.Save(5, &s.stats)
+	stateSinkObject.Save(6, &s.routeTable)
+	stateSinkObject.Save(7, &s.nics)
+	stateSinkObject.Save(8, &s.defaultForwardingEnabled)
+	stateSinkObject.Save(9, &s.nicIDGen)
+	stateSinkObject.Save(10, &s.cleanupEndpoints)
+	stateSinkObject.Save(11, &s.PortManager)
+	stateSinkObject.Save(12, &s.clock)
+	stateSinkObject.Save(13, &s.handleLocal)
+	stateSinkObject.Save(14, &s.restoredEndpoints)
+	stateSinkObject.Save(15, &s.resumableEndpoints)
+	stateSinkObject.Save(16, &s.icmpRateLimiter)
+	stateSinkObject.Save(17, &s.seed)
+	stateSinkObject.Save(18, &s.nudConfigs)
+	stateSinkObject.Save(19, &s.nudDisp)
+	stateSinkObject.Save(20, &s.sendBufferSize)
+	stateSinkObject.Save(21, &s.receiveBufferSize)
+	stateSinkObject.Save(22, &s.tcpInvalidRateLimit)
+	stateSinkObject.Save(23, &s.tsOffsetSecret)
+}
+
+func (s *Stack) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *Stack) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.transportProtocols)
+	stateSourceObject.Load(1, &s.networkProtocols)
+	stateSourceObject.Load(2, &s.rawFactory)
+	stateSourceObject.Load(3, &s.packetEndpointWriteSupported)
+	stateSourceObject.Load(4, &s.demux)
+	stateSourceObject.Load(5, &s.stats)
+	stateSourceObject.Load(6, &s.routeTable)
+	stateSourceObject.Load(7, &s.nics)
+	stateSourceObject.Load(8, &s.defaultForwardingEnabled)
+	stateSourceObject.Load(9, &s.nicIDGen)
+	stateSourceObject.Load(10, &s.cleanupEndpoints)
+	stateSourceObject.Load(11, &s.PortManager)
+	stateSourceObject.Load(12, &s.clock)
+	stateSourceObject.Load(13, &s.handleLocal)
+	stateSourceObject.Load(14, &s.restoredEndpoints)
+	stateSourceObject.Load(15, &s.resumableEndpoints)
+	stateSourceObject.Load(16, &s.icmpRateLimiter)
+	stateSourceObject.Load(17, &s.seed)
+	stateSourceObject.Load(18, &s.nudConfigs)
+	stateSourceObject.Load(19, &s.nudDisp)
+	stateSourceObject.Load(20, &s.sendBufferSize)
+	stateSourceObject.Load(21, &s.receiveBufferSize)
+	stateSourceObject.Load(22, &s.tcpInvalidRateLimit)
+	stateSourceObject.Load(23, &s.tsOffsetSecret)
+}
+
 func (t *TransportEndpointInfo) StateTypeName() string {
 	return "pkg/tcpip/stack.TransportEndpointInfo"
 }
@@ -874,10 +2045,10 @@ func (t *TransportEndpointInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(5, &t.RegisterNICID)
 }
 
-func (t *TransportEndpointInfo) afterLoad() {}
+func (t *TransportEndpointInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TransportEndpointInfo) StateLoad(stateSourceObject state.Source) {
+func (t *TransportEndpointInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.NetProto)
 	stateSourceObject.Load(1, &t.TransProto)
 	stateSourceObject.Load(2, &t.ID)
@@ -901,6 +2072,12 @@ func (t *TCPCubicState) StateFields() []string {
 		"Beta",
 		"WC",
 		"WEst",
+		"EndSeq",
+		"CurrRTT",
+		"LastRTT",
+		"SampleCount",
+		"LastAck",
+		"RoundStart",
 	}
 }
 
@@ -918,12 +2095,18 @@ func (t *TCPCubicState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(6, &t.Beta)
 	stateSinkObject.Save(7, &t.WC)
 	stateSinkObject.Save(8, &t.WEst)
+	stateSinkObject.Save(9, &t.EndSeq)
+	stateSinkObject.Save(10, &t.CurrRTT)
+	stateSinkObject.Save(11, &t.LastRTT)
+	stateSinkObject.Save(12, &t.SampleCount)
+	stateSinkObject.Save(13, &t.LastAck)
+	stateSinkObject.Save(14, &t.RoundStart)
 }
 
-func (t *TCPCubicState) afterLoad() {}
+func (t *TCPCubicState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPCubicState) StateLoad(stateSourceObject state.Source) {
+func (t *TCPCubicState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.WLastMax)
 	stateSourceObject.Load(1, &t.WMax)
 	stateSourceObject.Load(2, &t.T)
@@ -933,6 +2116,12 @@ func (t *TCPCubicState) StateLoad(stateSourceObject state.Source) {
 	stateSourceObject.Load(6, &t.Beta)
 	stateSourceObject.Load(7, &t.WC)
 	stateSourceObject.Load(8, &t.WEst)
+	stateSourceObject.Load(9, &t.EndSeq)
+	stateSourceObject.Load(10, &t.CurrRTT)
+	stateSourceObject.Load(11, &t.LastRTT)
+	stateSourceObject.Load(12, &t.SampleCount)
+	stateSourceObject.Load(13, &t.LastAck)
+	stateSourceObject.Load(14, &t.RoundStart)
 }
 
 func (t *TCPRACKState) StateTypeName() string {
@@ -971,10 +2160,10 @@ func (t *TCPRACKState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(9, &t.RTTSeq)
 }
 
-func (t *TCPRACKState) afterLoad() {}
+func (t *TCPRACKState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPRACKState) StateLoad(stateSourceObject state.Source) {
+func (t *TCPRACKState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.XmitTime)
 	stateSourceObject.Load(1, &t.EndSequence)
 	stateSourceObject.Load(2, &t.FACK)
@@ -1011,10 +2200,10 @@ func (t *TCPEndpointID) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(3, &t.RemoteAddress)
 }
 
-func (t *TCPEndpointID) afterLoad() {}
+func (t *TCPEndpointID) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPEndpointID) StateLoad(stateSourceObject state.Source) {
+func (t *TCPEndpointID) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.LocalPort)
 	stateSourceObject.Load(1, &t.LocalAddress)
 	stateSourceObject.Load(2, &t.RemotePort)
@@ -1049,10 +2238,10 @@ func (t *TCPFastRecoveryState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(5, &t.RescueRxt)
 }
 
-func (t *TCPFastRecoveryState) afterLoad() {}
+func (t *TCPFastRecoveryState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPFastRecoveryState) StateLoad(stateSourceObject state.Source) {
+func (t *TCPFastRecoveryState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.Active)
 	stateSourceObject.Load(1, &t.First)
 	stateSourceObject.Load(2, &t.Last)
@@ -1085,10 +2274,10 @@ func (t *TCPReceiverState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(3, &t.PendingBufUsed)
 }
 
-func (t *TCPReceiverState) afterLoad() {}
+func (t *TCPReceiverState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPReceiverState) StateLoad(stateSourceObject state.Source) {
+func (t *TCPReceiverState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.RcvNxt)
 	stateSourceObject.Load(1, &t.RcvAcc)
 	stateSourceObject.Load(2, &t.RcvWndScale)
@@ -1117,10 +2306,10 @@ func (t *TCPRTTState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &t.SRTTInited)
 }
 
-func (t *TCPRTTState) afterLoad() {}
+func (t *TCPRTTState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPRTTState) StateLoad(stateSourceObject state.Source) {
+func (t *TCPRTTState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.SRTT)
 	stateSourceObject.Load(1, &t.RTTVar)
 	stateSourceObject.Load(2, &t.SRTTInited)
@@ -1188,10 +2377,10 @@ func (t *TCPSenderState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(22, &t.SpuriousRecovery)
 }
 
-func (t *TCPSenderState) afterLoad() {}
+func (t *TCPSenderState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPSenderState) StateLoad(stateSourceObject state.Source) {
+func (t *TCPSenderState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.LastSendTime)
 	stateSourceObject.Load(1, &t.DupAckCount)
 	stateSourceObject.Load(2, &t.SndCwnd)
@@ -1239,10 +2428,10 @@ func (t *TCPSACKInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &t.MaxSACKED)
 }
 
-func (t *TCPSACKInfo) afterLoad() {}
+func (t *TCPSACKInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPSACKInfo) StateLoad(stateSourceObject state.Source) {
+func (t *TCPSACKInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.Blocks)
 	stateSourceObject.Load(1, &t.ReceivedBlocks)
 	stateSourceObject.Load(2, &t.MaxSACKED)
@@ -1282,10 +2471,10 @@ func (r *RcvBufAutoTuneParams) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(8, &r.Disabled)
 }
 
-func (r *RcvBufAutoTuneParams) afterLoad() {}
+func (r *RcvBufAutoTuneParams) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *RcvBufAutoTuneParams) StateLoad(stateSourceObject state.Source) {
+func (r *RcvBufAutoTuneParams) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.MeasureTime)
 	stateSourceObject.Load(1, &r.CopiedBytes)
 	stateSourceObject.Load(2, &r.PrevCopiedBytes)
@@ -1319,10 +2508,10 @@ func (t *TCPRcvBufState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &t.RcvClosed)
 }
 
-func (t *TCPRcvBufState) afterLoad() {}
+func (t *TCPRcvBufState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPRcvBufState) StateLoad(stateSourceObject state.Source) {
+func (t *TCPRcvBufState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.RcvBufUsed)
 	stateSourceObject.Load(1, &t.RcvAutoParams)
 	stateSourceObject.Load(2, &t.RcvClosed)
@@ -1356,10 +2545,10 @@ func (t *TCPSndBufState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(5, &t.AutoTuneSndBufDisabled)
 }
 
-func (t *TCPSndBufState) afterLoad() {}
+func (t *TCPSndBufState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPSndBufState) StateLoad(stateSourceObject state.Source) {
+func (t *TCPSndBufState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.SndBufSize)
 	stateSourceObject.Load(1, &t.SndBufUsed)
 	stateSourceObject.Load(2, &t.SndClosed)
@@ -1392,10 +2581,10 @@ func (t *TCPEndpointStateInner) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(3, &t.RecentTS)
 }
 
-func (t *TCPEndpointStateInner) afterLoad() {}
+func (t *TCPEndpointStateInner) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPEndpointStateInner) StateLoad(stateSourceObject state.Source) {
+func (t *TCPEndpointStateInner) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.TSOffset)
 	stateSourceObject.Load(1, &t.SACKPermitted)
 	stateSourceObject.Load(2, &t.SendTSOk)
@@ -1434,10 +2623,10 @@ func (t *TCPEndpointState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(7, &t.Sender)
 }
 
-func (t *TCPEndpointState) afterLoad() {}
+func (t *TCPEndpointState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (t *TCPEndpointState) StateLoad(stateSourceObject state.Source) {
+func (t *TCPEndpointState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &t.TCPEndpointStateInner)
 	stateSourceObject.Load(1, &t.ID)
 	stateSourceObject.Load(2, &t.SegTime)
@@ -1448,6 +2637,121 @@ func (t *TCPEndpointState) StateLoad(stateSourceObject state.Source) {
 	stateSourceObject.Load(7, &t.Sender)
 }
 
+func (p *protocolIDs) StateTypeName() string {
+	return "pkg/tcpip/stack.protocolIDs"
+}
+
+func (p *protocolIDs) StateFields() []string {
+	return []string{
+		"network",
+		"transport",
+	}
+}
+
+func (p *protocolIDs) beforeSave() {}
+
+// +checklocksignore
+func (p *protocolIDs) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.network)
+	stateSinkObject.Save(1, &p.transport)
+}
+
+func (p *protocolIDs) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *protocolIDs) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.network)
+	stateSourceObject.Load(1, &p.transport)
+}
+
+func (eps *transportEndpoints) StateTypeName() string {
+	return "pkg/tcpip/stack.transportEndpoints"
+}
+
+func (eps *transportEndpoints) StateFields() []string {
+	return []string{
+		"endpoints",
+		"rawEndpoints",
+	}
+}
+
+func (eps *transportEndpoints) beforeSave() {}
+
+// +checklocksignore
+func (eps *transportEndpoints) StateSave(stateSinkObject state.Sink) {
+	eps.beforeSave()
+	stateSinkObject.Save(0, &eps.endpoints)
+	stateSinkObject.Save(1, &eps.rawEndpoints)
+}
+
+func (eps *transportEndpoints) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (eps *transportEndpoints) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &eps.endpoints)
+	stateSourceObject.Load(1, &eps.rawEndpoints)
+}
+
+func (epsByNIC *endpointsByNIC) StateTypeName() string {
+	return "pkg/tcpip/stack.endpointsByNIC"
+}
+
+func (epsByNIC *endpointsByNIC) StateFields() []string {
+	return []string{
+		"seed",
+		"endpoints",
+	}
+}
+
+func (epsByNIC *endpointsByNIC) beforeSave() {}
+
+// +checklocksignore
+func (epsByNIC *endpointsByNIC) StateSave(stateSinkObject state.Sink) {
+	epsByNIC.beforeSave()
+	stateSinkObject.Save(0, &epsByNIC.seed)
+	stateSinkObject.Save(1, &epsByNIC.endpoints)
+}
+
+func (epsByNIC *endpointsByNIC) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (epsByNIC *endpointsByNIC) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &epsByNIC.seed)
+	stateSourceObject.Load(1, &epsByNIC.endpoints)
+}
+
+func (d *transportDemuxer) StateTypeName() string {
+	return "pkg/tcpip/stack.transportDemuxer"
+}
+
+func (d *transportDemuxer) StateFields() []string {
+	return []string{
+		"stack",
+		"protocol",
+		"queuedProtocols",
+	}
+}
+
+func (d *transportDemuxer) beforeSave() {}
+
+// +checklocksignore
+func (d *transportDemuxer) StateSave(stateSinkObject state.Sink) {
+	d.beforeSave()
+	stateSinkObject.Save(0, &d.stack)
+	stateSinkObject.Save(1, &d.protocol)
+	stateSinkObject.Save(2, &d.queuedProtocols)
+}
+
+func (d *transportDemuxer) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (d *transportDemuxer) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &d.stack)
+	stateSourceObject.Load(1, &d.protocol)
+	stateSourceObject.Load(2, &d.queuedProtocols)
+}
+
 func (ep *multiPortEndpoint) StateTypeName() string {
 	return "pkg/tcpip/stack.multiPortEndpoint"
 }
@@ -1474,10 +2778,10 @@ func (ep *multiPortEndpoint) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(4, &ep.endpoints)
 }
 
-func (ep *multiPortEndpoint) afterLoad() {}
+func (ep *multiPortEndpoint) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (ep *multiPortEndpoint) StateLoad(stateSourceObject state.Source) {
+func (ep *multiPortEndpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &ep.demux)
 	stateSourceObject.Load(1, &ep.netProto)
 	stateSourceObject.Load(2, &ep.transProto)
@@ -1505,10 +2809,10 @@ func (l *tupleList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *tupleList) afterLoad() {}
+func (l *tupleList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *tupleList) StateLoad(stateSourceObject state.Source) {
+func (l *tupleList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
@@ -1533,40 +2837,75 @@ func (e *tupleEntry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *tupleEntry) afterLoad() {}
+func (e *tupleEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *tupleEntry) StateLoad(stateSourceObject state.Source) {
+func (e *tupleEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
 
 func init() {
 	state.Register((*addressStateRefs)(nil))
+	state.Register((*AddressableEndpointState)(nil))
+	state.Register((*AddressableEndpointStateOptions)(nil))
+	state.Register((*addressState)(nil))
 	state.Register((*tuple)(nil))
 	state.Register((*tupleID)(nil))
 	state.Register((*conn)(nil))
 	state.Register((*ConnTrack)(nil))
 	state.Register((*bucket)(nil))
-	state.Register((*groPacketList)(nil))
-	state.Register((*groPacketEntry)(nil))
+	state.Register((*ICMPRateLimiter)(nil))
+	state.Register((*AcceptTarget)(nil))
+	state.Register((*DropTarget)(nil))
+	state.Register((*RejectIPv4Target)(nil))
+	state.Register((*RejectIPv6Target)(nil))
+	state.Register((*ErrorTarget)(nil))
+	state.Register((*UserChainTarget)(nil))
+	state.Register((*ReturnTarget)(nil))
+	state.Register((*DNATTarget)(nil))
+	state.Register((*RedirectTarget)(nil))
+	state.Register((*SNATTarget)(nil))
+	state.Register((*MasqueradeTarget)(nil))
 	state.Register((*IPTables)(nil))
 	state.Register((*Table)(nil))
 	state.Register((*Rule)(nil))
 	state.Register((*IPHeaderFilter)(nil))
+	state.Register((*dynamicCacheEntry)(nil))
+	state.Register((*neighborCacheMu)(nil))
+	state.Register((*neighborCache)(nil))
 	state.Register((*neighborEntryList)(nil))
 	state.Register((*neighborEntryEntry)(nil))
+	state.Register((*linkResolver)(nil))
+	state.Register((*nic)(nil))
+	state.Register((*packetEndpointList)(nil))
+	state.Register((*delegatingQueueingDiscipline)(nil))
+	state.Register((*sharedStats)(nil))
+	state.Register((*multiCounterNICPacketStats)(nil))
+	state.Register((*multiCounterNICNeighborStats)(nil))
+	state.Register((*multiCounterNICStats)(nil))
+	state.Register((*NUDConfigurations)(nil))
+	state.Register((*nudStateMu)(nil))
+	state.Register((*NUDState)(nil))
 	state.Register((*PacketBuffer)(nil))
 	state.Register((*headerInfo)(nil))
 	state.Register((*PacketData)(nil))
 	state.Register((*PacketBufferList)(nil))
 	state.Register((*packetBufferRefs)(nil))
+	state.Register((*pendingPacket)(nil))
+	state.Register((*packetsPendingLinkResolutionMu)(nil))
+	state.Register((*packetsPendingLinkResolution)(nil))
 	state.Register((*TransportEndpointID)(nil))
 	state.Register((*NetworkPacketInfo)(nil))
+	state.Register((*AddressLifetimes)(nil))
+	state.Register((*UnicastSourceAndMulticastDestination)(nil))
+	state.Register((*DADConfigurations)(nil))
 	state.Register((*GSOType)(nil))
 	state.Register((*GSO)(nil))
 	state.Register((*routeInfo)(nil))
 	state.Register((*RouteInfo)(nil))
+	state.Register((*transportProtocolState)(nil))
+	state.Register((*Stack)(nil))
 	state.Register((*TransportEndpointInfo)(nil))
 	state.Register((*TCPCubicState)(nil))
 	state.Register((*TCPRACKState)(nil))
@@ -1581,6 +2920,10 @@ func init() {
 	state.Register((*TCPSndBufState)(nil))
 	state.Register((*TCPEndpointStateInner)(nil))
 	state.Register((*TCPEndpointState)(nil))
+	state.Register((*protocolIDs)(nil))
+	state.Register((*transportEndpoints)(nil))
+	state.Register((*endpointsByNIC)(nil))
+	state.Register((*transportDemuxer)(nil))
 	state.Register((*multiPortEndpoint)(nil))
 	state.Register((*tupleList)(nil))
 	state.Register((*tupleEntry)(nil))
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/state_conn_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/state_conn_mutex.go
index f1593040..6f9075b5 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/state_conn_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/state_conn_mutex.go
@@ -17,7 +17,7 @@ type stateConnRWMutex struct {
 var stateConnlockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type stateConnlockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/tcp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/tcp.go
index 44b866db..f5273405 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/tcp.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/tcp.go
@@ -15,6 +15,7 @@
 package stack
 
 import (
+	"context"
 	"time"
 
 	"gvisor.dev/gvisor/pkg/atomicbitops"
@@ -24,6 +25,19 @@ import (
 	"gvisor.dev/gvisor/pkg/tcpip/seqnum"
 )
 
+// contextID is this package's type for context.Context.Value keys.
+type contextID int
+
+const (
+	// CtxRestoreStack is a Context.Value key for the stack to be used in restore.
+	CtxRestoreStack contextID = iota
+)
+
+// RestoreStackFromContext returns the stack to be used during restore.
+func RestoreStackFromContext(ctx context.Context) *Stack {
+	return ctx.Value(CtxRestoreStack).(*Stack)
+}
+
 // TCPProbeFunc is the expected function type for a TCP probe function to be
 // passed to stack.AddTCPProbe.
 type TCPProbeFunc func(s *TCPEndpointState)
@@ -71,6 +85,26 @@ type TCPCubicState struct {
 	// WEst is the window computed by CUBIC at time
 	// TimeSinceLastCongestion+RTT i.e WC(TimeSinceLastCongestion+RTT).
 	WEst float64
+
+	// EndSeq is the sequence number that, when cumulatively ACK'd, ends the
+	// HyStart round.
+	EndSeq seqnum.Value
+
+	// CurrRTT is the minimum round-trip time from the current round.
+	CurrRTT time.Duration
+
+	// LastRTT is the minimum round-trip time from the previous round.
+	LastRTT time.Duration
+
+	// SampleCount is the number of samples from the current round.
+	SampleCount uint
+
+	// LastAck is the time we received the most recent ACK (or start of round if
+	// more recent).
+	LastAck tcpip.MonotonicTime
+
+	// RoundStart is the time we started the most recent HyStart round.
+	RoundStart tcpip.MonotonicTime
 }
 
 // TCPRACKState is used to hold a copy of the internal RACK state when the
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go
index 6d38b637..98e5b1df 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go
@@ -23,6 +23,7 @@ import (
 	"gvisor.dev/gvisor/pkg/tcpip/ports"
 )
 
+// +stateify savable
 type protocolIDs struct {
 	network   tcpip.NetworkProtocolNumber
 	transport tcpip.TransportProtocolNumber
@@ -30,8 +31,10 @@ type protocolIDs struct {
 
 // transportEndpoints manages all endpoints of a given protocol. It has its own
 // mutex so as to reduce interference between protocols.
+//
+// +stateify savable
 type transportEndpoints struct {
-	mu transportEndpointsRWMutex
+	mu transportEndpointsRWMutex `state:"nosave"`
 	// +checklocks:mu
 	endpoints map[TransportEndpointID]*endpointsByNIC
 	// rawEndpoints contains endpoints for raw sockets, which receive all
@@ -133,11 +136,12 @@ func (eps *transportEndpoints) findEndpointLocked(id TransportEndpointID) *endpo
 	return matchedEP
 }
 
+// +stateify savable
 type endpointsByNIC struct {
 	// seed is a random secret for a jenkins hash.
 	seed uint32
 
-	mu endpointsByNICRWMutex
+	mu endpointsByNICRWMutex `state:"nosave"`
 	// +checklocks:mu
 	endpoints map[tcpip.NICID]*multiPortEndpoint
 }
@@ -155,7 +159,7 @@ func (epsByNIC *endpointsByNIC) transportEndpoints() []TransportEndpoint {
 // handlePacket is called by the stack when new packets arrive to this transport
 // endpoint. It returns false if the packet could not be matched to any
 // transport endpoint, true otherwise.
-func (epsByNIC *endpointsByNIC) handlePacket(id TransportEndpointID, pkt PacketBufferPtr) bool {
+func (epsByNIC *endpointsByNIC) handlePacket(id TransportEndpointID, pkt *PacketBuffer) bool {
 	epsByNIC.mu.RLock()
 
 	mpep, ok := epsByNIC.endpoints[pkt.NICID]
@@ -187,7 +191,7 @@ func (epsByNIC *endpointsByNIC) handlePacket(id TransportEndpointID, pkt PacketB
 }
 
 // handleError delivers an error to the transport endpoint identified by id.
-func (epsByNIC *endpointsByNIC) handleError(n *nic, id TransportEndpointID, transErr TransportError, pkt PacketBufferPtr) {
+func (epsByNIC *endpointsByNIC) handleError(n *nic, id TransportEndpointID, transErr TransportError, pkt *PacketBuffer) {
 	epsByNIC.mu.RLock()
 
 	mpep, ok := epsByNIC.endpoints[n.ID()]
@@ -266,6 +270,8 @@ func (epsByNIC *endpointsByNIC) unregisterEndpoint(bindToDevice tcpip.NICID, t T
 // of demultiplexing: first based on the network and transport protocols, then
 // based on endpoints IDs. It should only be instantiated via
 // newTransportDemuxer.
+//
+// +stateify savable
 type transportDemuxer struct {
 	stack *Stack
 
@@ -278,7 +284,7 @@ type transportDemuxer struct {
 // the dispatcher to delivery packets to the QueuePacket method instead of
 // calling HandlePacket directly on the endpoint.
 type queuedTransportProtocol interface {
-	QueuePacket(ep TransportEndpoint, id TransportEndpointID, pkt PacketBufferPtr)
+	QueuePacket(ep TransportEndpoint, id TransportEndpointID, pkt *PacketBuffer)
 }
 
 func newTransportDemuxer(stack *Stack) *transportDemuxer {
@@ -400,7 +406,7 @@ func (ep *multiPortEndpoint) selectEndpoint(id TransportEndpointID, seed uint32)
 	return ep.endpoints[idx]
 }
 
-func (ep *multiPortEndpoint) handlePacketAll(id TransportEndpointID, pkt PacketBufferPtr) {
+func (ep *multiPortEndpoint) handlePacketAll(id TransportEndpointID, pkt *PacketBuffer) {
 	ep.mu.RLock()
 	queuedProtocol, mustQueue := ep.demux.queuedProtocols[protocolIDs{ep.netProto, ep.transProto}]
 	// HandlePacket may modify pkt, so each endpoint needs
@@ -546,7 +552,7 @@ func (d *transportDemuxer) unregisterEndpoint(netProtos []tcpip.NetworkProtocolN
 // deliverPacket attempts to find one or more matching transport endpoints, and
 // then, if matches are found, delivers the packet to them. Returns true if
 // the packet no longer needs to be handled.
-func (d *transportDemuxer) deliverPacket(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr, id TransportEndpointID) bool {
+func (d *transportDemuxer) deliverPacket(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer, id TransportEndpointID) bool {
 	eps, ok := d.protocol[protocolIDs{pkt.NetworkProtocolNumber, protocol}]
 	if !ok {
 		return false
@@ -599,7 +605,7 @@ func (d *transportDemuxer) deliverPacket(protocol tcpip.TransportProtocolNumber,
 
 // deliverRawPacket attempts to deliver the given packet and returns whether it
 // was delivered successfully.
-func (d *transportDemuxer) deliverRawPacket(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr) bool {
+func (d *transportDemuxer) deliverRawPacket(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer) bool {
 	eps, ok := d.protocol[protocolIDs{pkt.NetworkProtocolNumber, protocol}]
 	if !ok {
 		return false
@@ -633,7 +639,7 @@ func (d *transportDemuxer) deliverRawPacket(protocol tcpip.TransportProtocolNumb
 // endpoint.
 //
 // Returns true if the error was delivered.
-func (d *transportDemuxer) deliverError(n *nic, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, transErr TransportError, pkt PacketBufferPtr, id TransportEndpointID) bool {
+func (d *transportDemuxer) deliverError(n *nic, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, transErr TransportError, pkt *PacketBuffer, id TransportEndpointID) bool {
 	eps, ok := d.protocol[protocolIDs{net, trans}]
 	if !ok {
 		return false
@@ -718,7 +724,7 @@ func (d *transportDemuxer) unregisterRawEndpoint(netProto tcpip.NetworkProtocolN
 	eps.mu.Unlock()
 }
 
-func isInboundMulticastOrBroadcast(pkt PacketBufferPtr, localAddr tcpip.Address) bool {
+func isInboundMulticastOrBroadcast(pkt *PacketBuffer, localAddr tcpip.Address) bool {
 	return pkt.NetworkPacketInfo.LocalAddressBroadcast || header.IsV4MulticastAddress(localAddr) || header.IsV6MulticastAddress(localAddr)
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_endpoints_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_endpoints_mutex.go
index 2098d77e..cb6f13d7 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_endpoints_mutex.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_endpoints_mutex.go
@@ -17,7 +17,7 @@ type transportEndpointsRWMutex struct {
 var transportEndpointslockNames []string
 
 // lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
-// refering to an index within lockNames.
+// referring to an index within lockNames.
 // Values are specified using the "consts" field of go_template_instance.
 type transportEndpointslockNameIndex int
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock.go
index cc3397ca..e80e7c4b 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock.go
@@ -91,6 +91,7 @@ func (*stdClock) AfterFunc(d time.Duration, f func()) Timer {
 	}
 }
 
+// +stateify savable
 type stdTimer struct {
 	t *time.Timer
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go
index 25be1755..530b46ec 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go
@@ -14,7 +14,10 @@
 
 package tcpip
 
-import "time"
+import (
+	"context"
+	"time"
+)
 
 // beforeSave is invoked by stateify.
 func (s *stdClock) beforeSave() {
@@ -22,6 +25,6 @@ func (s *stdClock) beforeSave() {
 }
 
 // afterLoad is invoked by stateify.
-func (s *stdClock) afterLoad() {
+func (s *stdClock) afterLoad(context.Context) {
 	s.baseTime = time.Now()
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go
index 4c0845ad..b8948173 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go
@@ -35,6 +35,8 @@ import (
 	"io"
 	"math"
 	"math/bits"
+	"math/rand"
+	"net"
 	"reflect"
 	"strconv"
 	"strings"
@@ -53,6 +55,17 @@ const (
 	ipv6ProtocolNumber = 0x86dd
 )
 
+const (
+	// LinkAddressSize is the size of a MAC address.
+	LinkAddressSize = 6
+)
+
+// Known IP address.
+var (
+	IPv4Zero = []byte{0, 0, 0, 0}
+	IPv6Zero = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+)
+
 // Errors related to Subnet
 var (
 	errSubnetLengthMismatch = errors.New("subnet length of address and mask differ")
@@ -112,6 +125,11 @@ func (mt MonotonicTime) Sub(u MonotonicTime) time.Duration {
 	return time.Unix(0, mt.nanoseconds).Sub(time.Unix(0, u.nanoseconds))
 }
 
+// Milliseconds returns the time in milliseconds.
+func (mt MonotonicTime) Milliseconds() int64 {
+	return mt.nanoseconds / 1e6
+}
+
 // A Clock provides the current time and schedules work for execution.
 //
 // Times returned by a Clock should always be used for application-visible
@@ -316,17 +334,24 @@ func (a Address) MatchingPrefix(b Address) uint8 {
 //
 // +stateify savable
 type AddressMask struct {
-	mask string
+	mask   [16]byte
+	length int
 }
 
 // MaskFrom returns a Mask based on str.
+//
+// MaskFrom may allocate, and so should not be in hot paths.
 func MaskFrom(str string) AddressMask {
-	return AddressMask{mask: str}
+	mask := AddressMask{length: len(str)}
+	copy(mask.mask[:], str)
+	return mask
 }
 
 // MaskFromBytes returns a Mask based on bs.
 func MaskFromBytes(bs []byte) AddressMask {
-	return AddressMask{mask: string(bs)}
+	mask := AddressMask{length: len(bs)}
+	copy(mask.mask[:], bs)
+	return mask
 }
 
 // String implements Stringer.
@@ -337,23 +362,23 @@ func (m AddressMask) String() string {
 // AsSlice returns a as a byte slice. Callers should be careful as it can
 // return a window into existing memory.
 func (m *AddressMask) AsSlice() []byte {
-	return []byte(m.mask)
+	return []byte(m.mask[:m.length])
 }
 
 // BitLen returns the length of the mask in bits.
 func (m AddressMask) BitLen() int {
-	return len(m.mask) * 8
+	return m.length * 8
 }
 
 // Len returns the length of the mask in bytes.
 func (m AddressMask) Len() int {
-	return len(m.mask)
+	return m.length
 }
 
 // Prefix returns the number of bits before the first host bit.
 func (m AddressMask) Prefix() int {
 	p := 0
-	for _, b := range []byte(m.mask) {
+	for _, b := range m.mask[:m.length] {
 		p += bits.LeadingZeros8(^b)
 	}
 	return p
@@ -366,6 +391,8 @@ func (m AddressMask) Equal(other AddressMask) bool {
 }
 
 // Subnet is a subnet defined by its address and mask.
+//
+// +stateify savable
 type Subnet struct {
 	address Address
 	mask    AddressMask
@@ -890,7 +917,7 @@ type WriteOptions struct {
 
 	// Atomic means that all data fetched from Payloader must be written to the
 	// endpoint. If Atomic is false, then data fetched from the Payloader may be
-	// discarded if available endpoint buffer space is unsufficient.
+	// discarded if available endpoint buffer space is insufficient.
 	Atomic bool
 
 	// ControlMessages contains optional overrides used when writing a packet.
@@ -983,10 +1010,13 @@ const (
 	UseDefaultIPv6HopLimit = -1
 )
 
+// PMTUDStrategy is the kind of PMTUD to perform.
+type PMTUDStrategy int
+
 const (
 	// PMTUDiscoveryWant is a setting of the MTUDiscoverOption to use
 	// per-route settings.
-	PMTUDiscoveryWant int = iota
+	PMTUDiscoveryWant PMTUDStrategy = iota
 
 	// PMTUDiscoveryDont is a setting of the MTUDiscoverOption to disable
 	// path MTU discovery.
@@ -1079,6 +1109,8 @@ func (*TCPDelayEnabled) isGettableTransportProtocolOption() {}
 func (*TCPDelayEnabled) isSettableTransportProtocolOption() {}
 
 // TCPSendBufferSizeRangeOption is the send buffer size range for TCP.
+//
+// +stateify savable
 type TCPSendBufferSizeRangeOption struct {
 	Min     int
 	Default int
@@ -1090,6 +1122,8 @@ func (*TCPSendBufferSizeRangeOption) isGettableTransportProtocolOption() {}
 func (*TCPSendBufferSizeRangeOption) isSettableTransportProtocolOption() {}
 
 // TCPReceiveBufferSizeRangeOption is the receive buffer size range for TCP.
+//
+// +stateify savable
 type TCPReceiveBufferSizeRangeOption struct {
 	Min     int
 	Default int
@@ -1128,7 +1162,7 @@ type SettableSocketOption interface {
 	isSettableSocketOption()
 }
 
-// ICMPv6Filter specifes a filter for ICMPv6 types.
+// ICMPv6Filter specifies a filter for ICMPv6 types.
 //
 // +stateify savable
 type ICMPv6Filter struct {
@@ -1178,8 +1212,6 @@ const (
 )
 
 // TCPInfoOption is used by GetSockOpt to expose TCP statistics.
-//
-// TODO(b/64800844): Add and populate stat fields.
 type TCPInfoOption struct {
 	// RTT is the smoothed round trip time.
 	RTT time.Duration
@@ -1386,16 +1418,16 @@ func (*TCPTimeWaitReuseOption) isGettableTransportProtocolOption() {}
 func (*TCPTimeWaitReuseOption) isSettableTransportProtocolOption() {}
 
 const (
-	// TCPTimeWaitReuseDisabled indicates reuse of port bound by endponts in TIME-WAIT cannot
+	// TCPTimeWaitReuseDisabled indicates reuse of port bound by endpoints in TIME-WAIT cannot
 	// be reused for new connections.
 	TCPTimeWaitReuseDisabled TCPTimeWaitReuseOption = iota
 
-	// TCPTimeWaitReuseGlobal indicates reuse of port bound by endponts in TIME-WAIT can
+	// TCPTimeWaitReuseGlobal indicates reuse of port bound by endpoints in TIME-WAIT can
 	// be reused for new connections irrespective of the src/dest addresses.
 	TCPTimeWaitReuseGlobal
 
 	// TCPTimeWaitReuseLoopbackOnly indicates reuse of port bound by endpoint in TIME-WAIT can
-	// only be reused if the connection was a connection over loopback. i.e src/dest adddresses
+	// only be reused if the connection was a connection over loopback. i.e. src/dest addresses
 	// are loopback addresses.
 	TCPTimeWaitReuseLoopbackOnly
 )
@@ -1434,6 +1466,8 @@ type IPv6PacketInfo struct {
 
 // SendBufferSizeOption is used by stack.(Stack*).Option/SetOption to
 // get/set the default, min and max send buffer sizes.
+//
+// +stateify savable
 type SendBufferSizeOption struct {
 	// Min is the minimum size for send buffer.
 	Min int
@@ -1447,6 +1481,8 @@ type SendBufferSizeOption struct {
 
 // ReceiveBufferSizeOption is used by stack.(Stack*).Option/SetOption to
 // get/set the default, min and max receive buffer sizes.
+//
+// +stateify savable
 type ReceiveBufferSizeOption struct {
 	// Min is the minimum size for send buffer.
 	Min int
@@ -1485,7 +1521,11 @@ func GetStackReceiveBufferLimits(so StackHandler) ReceiveBufferSizeOption {
 // Route is a row in the routing table. It specifies through which NIC (and
 // gateway) sets of packets should be routed. A row is considered viable if the
 // masked target address matches the destination address in the row.
+//
+// +stateify savable
 type Route struct {
+	RouteEntry
+
 	// Destination must contain the target address for this row to be viable.
 	Destination Subnet
 
@@ -1494,6 +1534,15 @@ type Route struct {
 
 	// NIC is the id of the nic to be used if this row is viable.
 	NIC NICID
+
+	// SourceHint indicates a preferred source address to use when NICs
+	// have multiple addresses.
+	SourceHint Address
+
+	// MTU is the maximum transmission unit to use for this route.
+	// If MTU is 0, this field is ignored and the MTU of the NIC for which this route
+	// is configured is used for egress packets.
+	MTU uint32
 }
 
 // String implements the fmt.Stringer interface.
@@ -1510,7 +1559,7 @@ func (r Route) String() string {
 // Equal returns true if the given Route is equal to this Route.
 func (r Route) Equal(to Route) bool {
 	// NOTE: This relies on the fact that r.Destination == to.Destination
-	return r.Destination.Equal(to.Destination) && r.Gateway == to.Gateway && r.NIC == to.NIC
+	return r.Destination.Equal(to.Destination) && r.NIC == to.NIC
 }
 
 // TransportProtocolNumber is the number of a transport protocol.
@@ -1554,6 +1603,8 @@ func (s *StatCounter) String() string {
 }
 
 // A MultiCounterStat keeps track of two counters at once.
+//
+// +stateify savable
 type MultiCounterStat struct {
 	a *StatCounter
 	b *StatCounter
@@ -1578,6 +1629,8 @@ func (m *MultiCounterStat) IncrementBy(v uint64) {
 }
 
 // ICMPv4PacketStats enumerates counts for all ICMPv4 packet types.
+//
+// +stateify savable
 type ICMPv4PacketStats struct {
 	// LINT.IfChange(ICMPv4PacketStats)
 
@@ -1619,6 +1672,8 @@ type ICMPv4PacketStats struct {
 }
 
 // ICMPv4SentPacketStats collects outbound ICMPv4-specific stats.
+//
+// +stateify savable
 type ICMPv4SentPacketStats struct {
 	// LINT.IfChange(ICMPv4SentPacketStats)
 
@@ -1635,6 +1690,8 @@ type ICMPv4SentPacketStats struct {
 }
 
 // ICMPv4ReceivedPacketStats collects inbound ICMPv4-specific stats.
+//
+// +stateify savable
 type ICMPv4ReceivedPacketStats struct {
 	// LINT.IfChange(ICMPv4ReceivedPacketStats)
 
@@ -1647,6 +1704,8 @@ type ICMPv4ReceivedPacketStats struct {
 }
 
 // ICMPv4Stats collects ICMPv4-specific stats.
+//
+// +stateify savable
 type ICMPv4Stats struct {
 	// LINT.IfChange(ICMPv4Stats)
 
@@ -1660,6 +1719,8 @@ type ICMPv4Stats struct {
 }
 
 // ICMPv6PacketStats enumerates counts for all ICMPv6 packet types.
+//
+// +stateify savable
 type ICMPv6PacketStats struct {
 	// LINT.IfChange(ICMPv6PacketStats)
 
@@ -1717,6 +1778,8 @@ type ICMPv6PacketStats struct {
 }
 
 // ICMPv6SentPacketStats collects outbound ICMPv6-specific stats.
+//
+// +stateify savable
 type ICMPv6SentPacketStats struct {
 	// LINT.IfChange(ICMPv6SentPacketStats)
 
@@ -1733,6 +1796,8 @@ type ICMPv6SentPacketStats struct {
 }
 
 // ICMPv6ReceivedPacketStats collects inbound ICMPv6-specific stats.
+//
+// +stateify savable
 type ICMPv6ReceivedPacketStats struct {
 	// LINT.IfChange(ICMPv6ReceivedPacketStats)
 
@@ -1753,6 +1818,8 @@ type ICMPv6ReceivedPacketStats struct {
 }
 
 // ICMPv6Stats collects ICMPv6-specific stats.
+//
+// +stateify savable
 type ICMPv6Stats struct {
 	// LINT.IfChange(ICMPv6Stats)
 
@@ -1766,6 +1833,8 @@ type ICMPv6Stats struct {
 }
 
 // ICMPStats collects ICMP-specific stats (both v4 and v6).
+//
+// +stateify savable
 type ICMPStats struct {
 	// V4 contains the ICMPv4-specifics stats.
 	V4 ICMPv4Stats
@@ -1775,6 +1844,8 @@ type ICMPStats struct {
 }
 
 // IGMPPacketStats enumerates counts for all IGMP packet types.
+//
+// +stateify savable
 type IGMPPacketStats struct {
 	// LINT.IfChange(IGMPPacketStats)
 
@@ -1800,6 +1871,8 @@ type IGMPPacketStats struct {
 }
 
 // IGMPSentPacketStats collects outbound IGMP-specific stats.
+//
+// +stateify savable
 type IGMPSentPacketStats struct {
 	// LINT.IfChange(IGMPSentPacketStats)
 
@@ -1812,6 +1885,8 @@ type IGMPSentPacketStats struct {
 }
 
 // IGMPReceivedPacketStats collects inbound IGMP-specific stats.
+//
+// +stateify savable
 type IGMPReceivedPacketStats struct {
 	// LINT.IfChange(IGMPReceivedPacketStats)
 
@@ -1824,13 +1899,15 @@ type IGMPReceivedPacketStats struct {
 	ChecksumErrors *StatCounter
 
 	// Unrecognized is the number of unrecognized messages counted, these are
-	// silently ignored for forward-compatibilty.
+	// silently ignored for forward-compatibility.
 	Unrecognized *StatCounter
 
 	// LINT.ThenChange(network/ipv4/stats.go:multiCounterIGMPReceivedPacketStats)
 }
 
 // IGMPStats collects IGMP-specific stats.
+//
+// +stateify savable
 type IGMPStats struct {
 	// LINT.IfChange(IGMPStats)
 
@@ -1844,6 +1921,8 @@ type IGMPStats struct {
 }
 
 // IPForwardingStats collects stats related to IP forwarding (both v4 and v6).
+//
+// +stateify savable
 type IPForwardingStats struct {
 	// LINT.IfChange(IPForwardingStats)
 
@@ -1891,7 +1970,7 @@ type IPForwardingStats struct {
 	UnknownOutputEndpoint *StatCounter
 
 	// NoMulticastPendingQueueBufferSpace is the number of multicast packets that
-	// were dropped due to insufficent buffer space in the pending packet queue.
+	// were dropped due to insufficient buffer space in the pending packet queue.
 	NoMulticastPendingQueueBufferSpace *StatCounter
 
 	// OutgoingDeviceNoBufferSpace is the number of packets that were dropped due
@@ -1906,6 +1985,8 @@ type IPForwardingStats struct {
 }
 
 // IPStats collects IP-specific stats (both v4 and v6).
+//
+// +stateify savable
 type IPStats struct {
 	// LINT.IfChange(IPStats)
 
@@ -1988,6 +2069,8 @@ type IPStats struct {
 }
 
 // ARPStats collects ARP-specific stats.
+//
+// +stateify savable
 type ARPStats struct {
 	// LINT.IfChange(ARPStats)
 
@@ -2041,6 +2124,8 @@ type ARPStats struct {
 }
 
 // TCPStats collects TCP-specific stats.
+//
+// +stateify savable
 type TCPStats struct {
 	// ActiveConnectionOpenings is the number of connections opened
 	// successfully via Connect.
@@ -2165,6 +2250,8 @@ type TCPStats struct {
 }
 
 // UDPStats collects UDP-specific stats.
+//
+// +stateify savable
 type UDPStats struct {
 	// PacketsReceived is the number of UDP datagrams received via
 	// HandlePacket.
@@ -2193,6 +2280,8 @@ type UDPStats struct {
 }
 
 // NICNeighborStats holds metrics for the neighbor table.
+//
+// +stateify savable
 type NICNeighborStats struct {
 	// LINT.IfChange(NICNeighborStats)
 
@@ -2214,6 +2303,8 @@ type NICNeighborStats struct {
 }
 
 // NICPacketStats holds basic packet statistics.
+//
+// +stateify savable
 type NICPacketStats struct {
 	// LINT.IfChange(NICPacketStats)
 
@@ -2228,8 +2319,10 @@ type NICPacketStats struct {
 
 // IntegralStatCounterMap holds a map associating integral keys with
 // StatCounters.
+//
+// +stateify savable
 type IntegralStatCounterMap struct {
-	mu sync.RWMutex
+	mu sync.RWMutex `state:"nosave"`
 	// +checklocks:mu
 	counterMap map[uint64]*StatCounter
 }
@@ -2280,6 +2373,8 @@ func (m *IntegralStatCounterMap) Increment(key uint64) {
 
 // A MultiIntegralStatCounterMap keeps track of two integral counter maps at
 // once.
+//
+// +stateify savable
 type MultiIntegralStatCounterMap struct {
 	a *IntegralStatCounterMap
 	b *IntegralStatCounterMap
@@ -2299,14 +2394,16 @@ func (m *MultiIntegralStatCounterMap) Increment(key uint64) {
 }
 
 // NICStats holds NIC statistics.
+//
+// +stateify savable
 type NICStats struct {
 	// LINT.IfChange(NICStats)
 
-	// UnknownL3ProtocolRcvdPacketCounts records the number of packets recieved
-	// for each unknown or unsupported netowrk protocol number.
+	// UnknownL3ProtocolRcvdPacketCounts records the number of packets received
+	// for each unknown or unsupported network protocol number.
 	UnknownL3ProtocolRcvdPacketCounts *IntegralStatCounterMap
 
-	// UnknownL4ProtocolRcvdPacketCounts records the number of packets recieved
+	// UnknownL4ProtocolRcvdPacketCounts records the number of packets received
 	// for each unknown or unsupported transport protocol number.
 	UnknownL4ProtocolRcvdPacketCounts *IntegralStatCounterMap
 
@@ -2344,6 +2441,8 @@ func (s NICStats) FillIn() NICStats {
 }
 
 // Stats holds statistics about the networking stack.
+//
+// +stateify savable
 type Stats struct {
 	// TODO(https://gvisor.dev/issues/5986): Make the DroppedPackets stat less
 	// ambiguous.
@@ -2617,7 +2716,7 @@ func ParseMACAddress(s string) (LinkAddress, error) {
 	parts := strings.FieldsFunc(s, func(c rune) bool {
 		return c == ':' || c == '-'
 	})
-	if len(parts) != 6 {
+	if len(parts) != LinkAddressSize {
 		return "", fmt.Errorf("inconsistent parts: %s", s)
 	}
 	addr := make([]byte, 0, len(parts))
@@ -2631,6 +2730,15 @@ func ParseMACAddress(s string) (LinkAddress, error) {
 	return LinkAddress(addr), nil
 }
 
+// GetRandMacAddr returns a mac address that can be used for local virtual devices.
+func GetRandMacAddr() LinkAddress {
+	mac := make(net.HardwareAddr, LinkAddressSize)
+	rand.Read(mac) // Fill with random data.
+	mac[0] &^= 0x1 // Clear multicast bit.
+	mac[0] |= 0x2  // Set local assignment bit (IEEE802).
+	return LinkAddress(mac)
+}
+
 // AddressWithPrefix is an address with its subnet prefix length.
 //
 // +stateify savable
@@ -2652,36 +2760,40 @@ func (a AddressWithPrefix) Subnet() Subnet {
 	addrLen := a.Address.length
 	if a.PrefixLen <= 0 {
 		return Subnet{
-			address: AddrFromSlice(bytes.Repeat([]byte{0}, addrLen)),
-			mask:    MaskFromBytes(bytes.Repeat([]byte{0}, addrLen)),
+			address: Address{length: addrLen},
+			mask:    AddressMask{length: addrLen},
 		}
 	}
 	if a.PrefixLen >= addrLen*8 {
-		return Subnet{
+		sub := Subnet{
 			address: a.Address,
-			mask:    MaskFromBytes(bytes.Repeat([]byte{0xff}, addrLen)),
+			mask:    AddressMask{length: addrLen},
+		}
+		for i := 0; i < addrLen; i++ {
+			sub.mask.mask[i] = 0xff
 		}
+		return sub
 	}
 
-	sa := make([]byte, addrLen)
-	sm := make([]byte, addrLen)
+	sa := Address{length: addrLen}
+	sm := AddressMask{length: addrLen}
 	n := uint(a.PrefixLen)
 	for i := 0; i < addrLen; i++ {
 		if n >= 8 {
-			sa[i] = a.Address.addr[i]
-			sm[i] = 0xff
+			sa.addr[i] = a.Address.addr[i]
+			sm.mask[i] = 0xff
 			n -= 8
 			continue
 		}
-		sm[i] = ^byte(0xff >> n)
-		sa[i] = a.Address.addr[i] & sm[i]
+		sm.mask[i] = ^byte(0xff >> n)
+		sa.addr[i] = a.Address.addr[i] & sm.mask[i]
 		n = 0
 	}
 
 	// For extra caution, call NewSubnet rather than directly creating the Subnet
 	// value. If that fails it indicates a serious bug in this code, so panic is
 	// in order.
-	s, err := NewSubnet(AddrFromSlice(sa), MaskFromBytes(sm))
+	s, err := NewSubnet(sa, sm)
 	if err != nil {
 		panic("invalid subnet: " + err.Error())
 	}
@@ -2690,6 +2802,8 @@ func (a AddressWithPrefix) Subnet() Subnet {
 
 // ProtocolAddress is an address and the network protocol it is associated
 // with.
+//
+// +stateify savable
 type ProtocolAddress struct {
 	// Protocol is the protocol of the address.
 	Protocol NetworkProtocolNumber
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_linux_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_linux_state_autogen.go
new file mode 100644
index 00000000..cbd75faa
--- /dev/null
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_linux_state_autogen.go
@@ -0,0 +1,6 @@
+// automatically generated by stateify.
+
+//go:build linux
+// +build linux
+
+package tcpip
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state.go
index 5181e355..0603ff04 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state.go
@@ -15,6 +15,7 @@
 package tcpip
 
 import (
+	"context"
 	"time"
 )
 
@@ -22,6 +23,6 @@ func (c *ReceivableControlMessages) saveTimestamp() int64 {
 	return c.Timestamp.UnixNano()
 }
 
-func (c *ReceivableControlMessages) loadTimestamp(nsec int64) {
+func (c *ReceivableControlMessages) loadTimestamp(_ context.Context, nsec int64) {
 	c.Timestamp = time.Unix(0, nsec)
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go
index 7fce7aeb..7a75e886 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go
@@ -3,6 +3,8 @@
 package tcpip
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -21,10 +23,10 @@ func (e *ErrAborted) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrAborted) afterLoad() {}
+func (e *ErrAborted) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrAborted) StateLoad(stateSourceObject state.Source) {
+func (e *ErrAborted) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrAddressFamilyNotSupported) StateTypeName() string {
@@ -42,10 +44,10 @@ func (e *ErrAddressFamilyNotSupported) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrAddressFamilyNotSupported) afterLoad() {}
+func (e *ErrAddressFamilyNotSupported) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrAddressFamilyNotSupported) StateLoad(stateSourceObject state.Source) {
+func (e *ErrAddressFamilyNotSupported) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrAlreadyBound) StateTypeName() string {
@@ -63,10 +65,10 @@ func (e *ErrAlreadyBound) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrAlreadyBound) afterLoad() {}
+func (e *ErrAlreadyBound) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrAlreadyBound) StateLoad(stateSourceObject state.Source) {
+func (e *ErrAlreadyBound) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrAlreadyConnected) StateTypeName() string {
@@ -84,10 +86,10 @@ func (e *ErrAlreadyConnected) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrAlreadyConnected) afterLoad() {}
+func (e *ErrAlreadyConnected) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrAlreadyConnected) StateLoad(stateSourceObject state.Source) {
+func (e *ErrAlreadyConnected) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrAlreadyConnecting) StateTypeName() string {
@@ -105,10 +107,10 @@ func (e *ErrAlreadyConnecting) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrAlreadyConnecting) afterLoad() {}
+func (e *ErrAlreadyConnecting) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrAlreadyConnecting) StateLoad(stateSourceObject state.Source) {
+func (e *ErrAlreadyConnecting) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrBadAddress) StateTypeName() string {
@@ -126,10 +128,10 @@ func (e *ErrBadAddress) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrBadAddress) afterLoad() {}
+func (e *ErrBadAddress) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrBadAddress) StateLoad(stateSourceObject state.Source) {
+func (e *ErrBadAddress) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrBadBuffer) StateTypeName() string {
@@ -147,10 +149,10 @@ func (e *ErrBadBuffer) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrBadBuffer) afterLoad() {}
+func (e *ErrBadBuffer) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrBadBuffer) StateLoad(stateSourceObject state.Source) {
+func (e *ErrBadBuffer) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrBadLocalAddress) StateTypeName() string {
@@ -168,10 +170,10 @@ func (e *ErrBadLocalAddress) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrBadLocalAddress) afterLoad() {}
+func (e *ErrBadLocalAddress) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrBadLocalAddress) StateLoad(stateSourceObject state.Source) {
+func (e *ErrBadLocalAddress) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrBroadcastDisabled) StateTypeName() string {
@@ -189,10 +191,10 @@ func (e *ErrBroadcastDisabled) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrBroadcastDisabled) afterLoad() {}
+func (e *ErrBroadcastDisabled) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrBroadcastDisabled) StateLoad(stateSourceObject state.Source) {
+func (e *ErrBroadcastDisabled) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrClosedForReceive) StateTypeName() string {
@@ -210,10 +212,10 @@ func (e *ErrClosedForReceive) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrClosedForReceive) afterLoad() {}
+func (e *ErrClosedForReceive) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrClosedForReceive) StateLoad(stateSourceObject state.Source) {
+func (e *ErrClosedForReceive) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrClosedForSend) StateTypeName() string {
@@ -231,10 +233,10 @@ func (e *ErrClosedForSend) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrClosedForSend) afterLoad() {}
+func (e *ErrClosedForSend) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrClosedForSend) StateLoad(stateSourceObject state.Source) {
+func (e *ErrClosedForSend) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrConnectStarted) StateTypeName() string {
@@ -252,10 +254,10 @@ func (e *ErrConnectStarted) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrConnectStarted) afterLoad() {}
+func (e *ErrConnectStarted) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrConnectStarted) StateLoad(stateSourceObject state.Source) {
+func (e *ErrConnectStarted) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrConnectionAborted) StateTypeName() string {
@@ -273,10 +275,10 @@ func (e *ErrConnectionAborted) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrConnectionAborted) afterLoad() {}
+func (e *ErrConnectionAborted) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrConnectionAborted) StateLoad(stateSourceObject state.Source) {
+func (e *ErrConnectionAborted) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrConnectionRefused) StateTypeName() string {
@@ -294,10 +296,10 @@ func (e *ErrConnectionRefused) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrConnectionRefused) afterLoad() {}
+func (e *ErrConnectionRefused) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrConnectionRefused) StateLoad(stateSourceObject state.Source) {
+func (e *ErrConnectionRefused) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrConnectionReset) StateTypeName() string {
@@ -315,10 +317,10 @@ func (e *ErrConnectionReset) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrConnectionReset) afterLoad() {}
+func (e *ErrConnectionReset) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrConnectionReset) StateLoad(stateSourceObject state.Source) {
+func (e *ErrConnectionReset) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrDestinationRequired) StateTypeName() string {
@@ -336,10 +338,10 @@ func (e *ErrDestinationRequired) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrDestinationRequired) afterLoad() {}
+func (e *ErrDestinationRequired) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrDestinationRequired) StateLoad(stateSourceObject state.Source) {
+func (e *ErrDestinationRequired) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrDuplicateAddress) StateTypeName() string {
@@ -357,10 +359,10 @@ func (e *ErrDuplicateAddress) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrDuplicateAddress) afterLoad() {}
+func (e *ErrDuplicateAddress) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrDuplicateAddress) StateLoad(stateSourceObject state.Source) {
+func (e *ErrDuplicateAddress) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrDuplicateNICID) StateTypeName() string {
@@ -378,10 +380,31 @@ func (e *ErrDuplicateNICID) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrDuplicateNICID) afterLoad() {}
+func (e *ErrDuplicateNICID) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (e *ErrDuplicateNICID) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+}
+
+func (e *ErrInvalidNICID) StateTypeName() string {
+	return "pkg/tcpip.ErrInvalidNICID"
+}
+
+func (e *ErrInvalidNICID) StateFields() []string {
+	return []string{}
+}
+
+func (e *ErrInvalidNICID) beforeSave() {}
+
+// +checklocksignore
+func (e *ErrInvalidNICID) StateSave(stateSinkObject state.Sink) {
+	e.beforeSave()
+}
+
+func (e *ErrInvalidNICID) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrDuplicateNICID) StateLoad(stateSourceObject state.Source) {
+func (e *ErrInvalidNICID) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrInvalidEndpointState) StateTypeName() string {
@@ -399,10 +422,10 @@ func (e *ErrInvalidEndpointState) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrInvalidEndpointState) afterLoad() {}
+func (e *ErrInvalidEndpointState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrInvalidEndpointState) StateLoad(stateSourceObject state.Source) {
+func (e *ErrInvalidEndpointState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrInvalidOptionValue) StateTypeName() string {
@@ -420,10 +443,10 @@ func (e *ErrInvalidOptionValue) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrInvalidOptionValue) afterLoad() {}
+func (e *ErrInvalidOptionValue) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrInvalidOptionValue) StateLoad(stateSourceObject state.Source) {
+func (e *ErrInvalidOptionValue) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrInvalidPortRange) StateTypeName() string {
@@ -441,10 +464,10 @@ func (e *ErrInvalidPortRange) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrInvalidPortRange) afterLoad() {}
+func (e *ErrInvalidPortRange) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrInvalidPortRange) StateLoad(stateSourceObject state.Source) {
+func (e *ErrInvalidPortRange) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrMalformedHeader) StateTypeName() string {
@@ -462,10 +485,10 @@ func (e *ErrMalformedHeader) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrMalformedHeader) afterLoad() {}
+func (e *ErrMalformedHeader) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrMalformedHeader) StateLoad(stateSourceObject state.Source) {
+func (e *ErrMalformedHeader) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrMessageTooLong) StateTypeName() string {
@@ -483,10 +506,10 @@ func (e *ErrMessageTooLong) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrMessageTooLong) afterLoad() {}
+func (e *ErrMessageTooLong) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrMessageTooLong) StateLoad(stateSourceObject state.Source) {
+func (e *ErrMessageTooLong) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrNetworkUnreachable) StateTypeName() string {
@@ -504,10 +527,10 @@ func (e *ErrNetworkUnreachable) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrNetworkUnreachable) afterLoad() {}
+func (e *ErrNetworkUnreachable) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrNetworkUnreachable) StateLoad(stateSourceObject state.Source) {
+func (e *ErrNetworkUnreachable) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrNoBufferSpace) StateTypeName() string {
@@ -525,10 +548,10 @@ func (e *ErrNoBufferSpace) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrNoBufferSpace) afterLoad() {}
+func (e *ErrNoBufferSpace) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrNoBufferSpace) StateLoad(stateSourceObject state.Source) {
+func (e *ErrNoBufferSpace) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrNoPortAvailable) StateTypeName() string {
@@ -546,10 +569,10 @@ func (e *ErrNoPortAvailable) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrNoPortAvailable) afterLoad() {}
+func (e *ErrNoPortAvailable) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrNoPortAvailable) StateLoad(stateSourceObject state.Source) {
+func (e *ErrNoPortAvailable) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrHostUnreachable) StateTypeName() string {
@@ -567,10 +590,10 @@ func (e *ErrHostUnreachable) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrHostUnreachable) afterLoad() {}
+func (e *ErrHostUnreachable) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrHostUnreachable) StateLoad(stateSourceObject state.Source) {
+func (e *ErrHostUnreachable) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrHostDown) StateTypeName() string {
@@ -588,10 +611,10 @@ func (e *ErrHostDown) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrHostDown) afterLoad() {}
+func (e *ErrHostDown) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrHostDown) StateLoad(stateSourceObject state.Source) {
+func (e *ErrHostDown) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrNoNet) StateTypeName() string {
@@ -609,10 +632,10 @@ func (e *ErrNoNet) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrNoNet) afterLoad() {}
+func (e *ErrNoNet) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrNoNet) StateLoad(stateSourceObject state.Source) {
+func (e *ErrNoNet) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrNoSuchFile) StateTypeName() string {
@@ -630,10 +653,10 @@ func (e *ErrNoSuchFile) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrNoSuchFile) afterLoad() {}
+func (e *ErrNoSuchFile) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrNoSuchFile) StateLoad(stateSourceObject state.Source) {
+func (e *ErrNoSuchFile) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrNotConnected) StateTypeName() string {
@@ -651,10 +674,10 @@ func (e *ErrNotConnected) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrNotConnected) afterLoad() {}
+func (e *ErrNotConnected) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrNotConnected) StateLoad(stateSourceObject state.Source) {
+func (e *ErrNotConnected) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrNotPermitted) StateTypeName() string {
@@ -672,10 +695,10 @@ func (e *ErrNotPermitted) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrNotPermitted) afterLoad() {}
+func (e *ErrNotPermitted) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrNotPermitted) StateLoad(stateSourceObject state.Source) {
+func (e *ErrNotPermitted) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrNotSupported) StateTypeName() string {
@@ -693,10 +716,10 @@ func (e *ErrNotSupported) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrNotSupported) afterLoad() {}
+func (e *ErrNotSupported) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrNotSupported) StateLoad(stateSourceObject state.Source) {
+func (e *ErrNotSupported) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrPortInUse) StateTypeName() string {
@@ -714,10 +737,10 @@ func (e *ErrPortInUse) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrPortInUse) afterLoad() {}
+func (e *ErrPortInUse) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrPortInUse) StateLoad(stateSourceObject state.Source) {
+func (e *ErrPortInUse) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrQueueSizeNotSupported) StateTypeName() string {
@@ -735,10 +758,10 @@ func (e *ErrQueueSizeNotSupported) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrQueueSizeNotSupported) afterLoad() {}
+func (e *ErrQueueSizeNotSupported) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrQueueSizeNotSupported) StateLoad(stateSourceObject state.Source) {
+func (e *ErrQueueSizeNotSupported) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrTimeout) StateTypeName() string {
@@ -756,10 +779,10 @@ func (e *ErrTimeout) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrTimeout) afterLoad() {}
+func (e *ErrTimeout) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrTimeout) StateLoad(stateSourceObject state.Source) {
+func (e *ErrTimeout) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrUnknownDevice) StateTypeName() string {
@@ -777,10 +800,10 @@ func (e *ErrUnknownDevice) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrUnknownDevice) afterLoad() {}
+func (e *ErrUnknownDevice) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrUnknownDevice) StateLoad(stateSourceObject state.Source) {
+func (e *ErrUnknownDevice) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrUnknownNICID) StateTypeName() string {
@@ -798,10 +821,10 @@ func (e *ErrUnknownNICID) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrUnknownNICID) afterLoad() {}
+func (e *ErrUnknownNICID) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrUnknownNICID) StateLoad(stateSourceObject state.Source) {
+func (e *ErrUnknownNICID) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrUnknownProtocol) StateTypeName() string {
@@ -819,10 +842,10 @@ func (e *ErrUnknownProtocol) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrUnknownProtocol) afterLoad() {}
+func (e *ErrUnknownProtocol) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrUnknownProtocol) StateLoad(stateSourceObject state.Source) {
+func (e *ErrUnknownProtocol) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrUnknownProtocolOption) StateTypeName() string {
@@ -840,10 +863,10 @@ func (e *ErrUnknownProtocolOption) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrUnknownProtocolOption) afterLoad() {}
+func (e *ErrUnknownProtocolOption) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrUnknownProtocolOption) StateLoad(stateSourceObject state.Source) {
+func (e *ErrUnknownProtocolOption) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrWouldBlock) StateTypeName() string {
@@ -861,10 +884,10 @@ func (e *ErrWouldBlock) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrWouldBlock) afterLoad() {}
+func (e *ErrWouldBlock) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrWouldBlock) StateLoad(stateSourceObject state.Source) {
+func (e *ErrWouldBlock) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrMissingRequiredFields) StateTypeName() string {
@@ -882,10 +905,10 @@ func (e *ErrMissingRequiredFields) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 }
 
-func (e *ErrMissingRequiredFields) afterLoad() {}
+func (e *ErrMissingRequiredFields) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrMissingRequiredFields) StateLoad(stateSourceObject state.Source) {
+func (e *ErrMissingRequiredFields) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (e *ErrMulticastInputCannotBeOutput) StateTypeName() string {
@@ -903,10 +926,66 @@ func (e *ErrMulticastInputCannotBeOutput) StateSave(stateSinkObject state.Sink)
 	e.beforeSave()
 }
 
-func (e *ErrMulticastInputCannotBeOutput) afterLoad() {}
+func (e *ErrMulticastInputCannotBeOutput) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (e *ErrMulticastInputCannotBeOutput) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+}
+
+func (l *RouteList) StateTypeName() string {
+	return "pkg/tcpip.RouteList"
+}
+
+func (l *RouteList) StateFields() []string {
+	return []string{
+		"head",
+		"tail",
+	}
+}
+
+func (l *RouteList) beforeSave() {}
+
+// +checklocksignore
+func (l *RouteList) StateSave(stateSinkObject state.Sink) {
+	l.beforeSave()
+	stateSinkObject.Save(0, &l.head)
+	stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *RouteList) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (l *RouteList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &l.head)
+	stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *RouteEntry) StateTypeName() string {
+	return "pkg/tcpip.RouteEntry"
+}
+
+func (e *RouteEntry) StateFields() []string {
+	return []string{
+		"next",
+		"prev",
+	}
+}
+
+func (e *RouteEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *RouteEntry) StateSave(stateSinkObject state.Sink) {
+	e.beforeSave()
+	stateSinkObject.Save(0, &e.next)
+	stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *RouteEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *ErrMulticastInputCannotBeOutput) StateLoad(stateSourceObject state.Source) {
+func (e *RouteEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &e.next)
+	stateSourceObject.Load(1, &e.prev)
 }
 
 func (l *sockErrorList) StateTypeName() string {
@@ -929,10 +1008,10 @@ func (l *sockErrorList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *sockErrorList) afterLoad() {}
+func (l *sockErrorList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *sockErrorList) StateLoad(stateSourceObject state.Source) {
+func (l *sockErrorList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
@@ -957,10 +1036,10 @@ func (e *sockErrorEntry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *sockErrorEntry) afterLoad() {}
+func (e *sockErrorEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *sockErrorEntry) StateLoad(stateSourceObject state.Source) {
+func (e *sockErrorEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
@@ -1037,10 +1116,10 @@ func (so *SocketOptions) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(27, &so.rcvlowat)
 }
 
-func (so *SocketOptions) afterLoad() {}
+func (so *SocketOptions) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (so *SocketOptions) StateLoad(stateSourceObject state.Source) {
+func (so *SocketOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &so.handler)
 	stateSourceObject.Load(1, &so.broadcastEnabled)
 	stateSourceObject.Load(2, &so.passCredEnabled)
@@ -1089,10 +1168,10 @@ func (l *LocalSockError) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &l.info)
 }
 
-func (l *LocalSockError) afterLoad() {}
+func (l *LocalSockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *LocalSockError) StateLoad(stateSourceObject state.Source) {
+func (l *LocalSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.info)
 }
 
@@ -1126,10 +1205,10 @@ func (s *SockError) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(6, &s.NetProto)
 }
 
-func (s *SockError) afterLoad() {}
+func (s *SockError) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *SockError) StateLoad(stateSourceObject state.Source) {
+func (s *SockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &s.sockErrorEntry)
 	stateSourceObject.Load(1, &s.Err)
 	stateSourceObject.Load(2, &s.Cause)
@@ -1156,9 +1235,34 @@ func (s *stdClock) StateSave(stateSinkObject state.Sink) {
 }
 
 // +checklocksignore
-func (s *stdClock) StateLoad(stateSourceObject state.Source) {
+func (s *stdClock) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &s.monotonicOffset)
-	stateSourceObject.AfterLoad(s.afterLoad)
+	stateSourceObject.AfterLoad(func() { s.afterLoad(ctx) })
+}
+
+func (st *stdTimer) StateTypeName() string {
+	return "pkg/tcpip.stdTimer"
+}
+
+func (st *stdTimer) StateFields() []string {
+	return []string{
+		"t",
+	}
+}
+
+func (st *stdTimer) beforeSave() {}
+
+// +checklocksignore
+func (st *stdTimer) StateSave(stateSinkObject state.Sink) {
+	st.beforeSave()
+	stateSinkObject.Save(0, &st.t)
+}
+
+func (st *stdTimer) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (st *stdTimer) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &st.t)
 }
 
 func (mt *MonotonicTime) StateTypeName() string {
@@ -1179,10 +1283,10 @@ func (mt *MonotonicTime) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &mt.nanoseconds)
 }
 
-func (mt *MonotonicTime) afterLoad() {}
+func (mt *MonotonicTime) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (mt *MonotonicTime) StateLoad(stateSourceObject state.Source) {
+func (mt *MonotonicTime) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &mt.nanoseconds)
 }
 
@@ -1206,10 +1310,10 @@ func (a *Address) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &a.length)
 }
 
-func (a *Address) afterLoad() {}
+func (a *Address) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (a *Address) StateLoad(stateSourceObject state.Source) {
+func (a *Address) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &a.addr)
 	stateSourceObject.Load(1, &a.length)
 }
@@ -1221,6 +1325,7 @@ func (m *AddressMask) StateTypeName() string {
 func (m *AddressMask) StateFields() []string {
 	return []string{
 		"mask",
+		"length",
 	}
 }
 
@@ -1230,13 +1335,43 @@ func (m *AddressMask) beforeSave() {}
 func (m *AddressMask) StateSave(stateSinkObject state.Sink) {
 	m.beforeSave()
 	stateSinkObject.Save(0, &m.mask)
+	stateSinkObject.Save(1, &m.length)
 }
 
-func (m *AddressMask) afterLoad() {}
+func (m *AddressMask) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (m *AddressMask) StateLoad(stateSourceObject state.Source) {
+func (m *AddressMask) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &m.mask)
+	stateSourceObject.Load(1, &m.length)
+}
+
+func (s *Subnet) StateTypeName() string {
+	return "pkg/tcpip.Subnet"
+}
+
+func (s *Subnet) StateFields() []string {
+	return []string{
+		"address",
+		"mask",
+	}
+}
+
+func (s *Subnet) beforeSave() {}
+
+// +checklocksignore
+func (s *Subnet) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.address)
+	stateSinkObject.Save(1, &s.mask)
+}
+
+func (s *Subnet) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *Subnet) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.address)
+	stateSourceObject.Load(1, &s.mask)
 }
 
 func (f *FullAddress) StateTypeName() string {
@@ -1263,10 +1398,10 @@ func (f *FullAddress) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(3, &f.LinkAddr)
 }
 
-func (f *FullAddress) afterLoad() {}
+func (f *FullAddress) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (f *FullAddress) StateLoad(stateSourceObject state.Source) {
+func (f *FullAddress) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &f.NIC)
 	stateSourceObject.Load(1, &f.Addr)
 	stateSourceObject.Load(2, &f.Port)
@@ -1301,10 +1436,10 @@ func (s *SendableControlMessages) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(5, &s.IPv6PacketInfo)
 }
 
-func (s *SendableControlMessages) afterLoad() {}
+func (s *SendableControlMessages) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *SendableControlMessages) StateLoad(stateSourceObject state.Source) {
+func (s *SendableControlMessages) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &s.HasTTL)
 	stateSourceObject.Load(1, &s.TTL)
 	stateSourceObject.Load(2, &s.HasHopLimit)
@@ -1369,10 +1504,10 @@ func (c *ReceivableControlMessages) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(18, &c.SockErr)
 }
 
-func (c *ReceivableControlMessages) afterLoad() {}
+func (c *ReceivableControlMessages) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (c *ReceivableControlMessages) StateLoad(stateSourceObject state.Source) {
+func (c *ReceivableControlMessages) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(1, &c.HasInq)
 	stateSourceObject.Load(2, &c.Inq)
 	stateSourceObject.Load(3, &c.HasTOS)
@@ -1391,7 +1526,7 @@ func (c *ReceivableControlMessages) StateLoad(stateSourceObject state.Source) {
 	stateSourceObject.Load(16, &c.HasOriginalDstAddress)
 	stateSourceObject.Load(17, &c.OriginalDstAddress)
 	stateSourceObject.Load(18, &c.SockErr)
-	stateSourceObject.LoadValue(0, new(int64), func(y any) { c.loadTimestamp(y.(int64)) })
+	stateSourceObject.LoadValue(0, new(int64), func(y any) { c.loadTimestamp(ctx, y.(int64)) })
 }
 
 func (l *LinkPacketInfo) StateTypeName() string {
@@ -1414,14 +1549,76 @@ func (l *LinkPacketInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.PktType)
 }
 
-func (l *LinkPacketInfo) afterLoad() {}
+func (l *LinkPacketInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *LinkPacketInfo) StateLoad(stateSourceObject state.Source) {
+func (l *LinkPacketInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.Protocol)
 	stateSourceObject.Load(1, &l.PktType)
 }
 
+func (t *TCPSendBufferSizeRangeOption) StateTypeName() string {
+	return "pkg/tcpip.TCPSendBufferSizeRangeOption"
+}
+
+func (t *TCPSendBufferSizeRangeOption) StateFields() []string {
+	return []string{
+		"Min",
+		"Default",
+		"Max",
+	}
+}
+
+func (t *TCPSendBufferSizeRangeOption) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPSendBufferSizeRangeOption) StateSave(stateSinkObject state.Sink) {
+	t.beforeSave()
+	stateSinkObject.Save(0, &t.Min)
+	stateSinkObject.Save(1, &t.Default)
+	stateSinkObject.Save(2, &t.Max)
+}
+
+func (t *TCPSendBufferSizeRangeOption) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (t *TCPSendBufferSizeRangeOption) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &t.Min)
+	stateSourceObject.Load(1, &t.Default)
+	stateSourceObject.Load(2, &t.Max)
+}
+
+func (t *TCPReceiveBufferSizeRangeOption) StateTypeName() string {
+	return "pkg/tcpip.TCPReceiveBufferSizeRangeOption"
+}
+
+func (t *TCPReceiveBufferSizeRangeOption) StateFields() []string {
+	return []string{
+		"Min",
+		"Default",
+		"Max",
+	}
+}
+
+func (t *TCPReceiveBufferSizeRangeOption) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPReceiveBufferSizeRangeOption) StateSave(stateSinkObject state.Sink) {
+	t.beforeSave()
+	stateSinkObject.Save(0, &t.Min)
+	stateSinkObject.Save(1, &t.Default)
+	stateSinkObject.Save(2, &t.Max)
+}
+
+func (t *TCPReceiveBufferSizeRangeOption) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (t *TCPReceiveBufferSizeRangeOption) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &t.Min)
+	stateSourceObject.Load(1, &t.Default)
+	stateSourceObject.Load(2, &t.Max)
+}
+
 func (f *ICMPv6Filter) StateTypeName() string {
 	return "pkg/tcpip.ICMPv6Filter"
 }
@@ -1440,10 +1637,10 @@ func (f *ICMPv6Filter) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &f.DenyType)
 }
 
-func (f *ICMPv6Filter) afterLoad() {}
+func (f *ICMPv6Filter) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (f *ICMPv6Filter) StateLoad(stateSourceObject state.Source) {
+func (f *ICMPv6Filter) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &f.DenyType)
 }
 
@@ -1467,10 +1664,10 @@ func (l *LingerOption) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.Timeout)
 }
 
-func (l *LingerOption) afterLoad() {}
+func (l *LingerOption) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *LingerOption) StateLoad(stateSourceObject state.Source) {
+func (l *LingerOption) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.Enabled)
 	stateSourceObject.Load(1, &l.Timeout)
 }
@@ -1497,10 +1694,10 @@ func (i *IPPacketInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &i.DestinationAddr)
 }
 
-func (i *IPPacketInfo) afterLoad() {}
+func (i *IPPacketInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *IPPacketInfo) StateLoad(stateSourceObject state.Source) {
+func (i *IPPacketInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.NIC)
 	stateSourceObject.Load(1, &i.LocalAddr)
 	stateSourceObject.Load(2, &i.DestinationAddr)
@@ -1526,229 +1723,1466 @@ func (i *IPv6PacketInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &i.NIC)
 }
 
-func (i *IPv6PacketInfo) afterLoad() {}
+func (i *IPv6PacketInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (i *IPv6PacketInfo) StateLoad(stateSourceObject state.Source) {
+func (i *IPv6PacketInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &i.Addr)
 	stateSourceObject.Load(1, &i.NIC)
 }
 
-func (s *StatCounter) StateTypeName() string {
-	return "pkg/tcpip.StatCounter"
+func (s *SendBufferSizeOption) StateTypeName() string {
+	return "pkg/tcpip.SendBufferSizeOption"
 }
 
-func (s *StatCounter) StateFields() []string {
+func (s *SendBufferSizeOption) StateFields() []string {
 	return []string{
-		"count",
+		"Min",
+		"Default",
+		"Max",
 	}
 }
 
-func (s *StatCounter) beforeSave() {}
+func (s *SendBufferSizeOption) beforeSave() {}
 
 // +checklocksignore
-func (s *StatCounter) StateSave(stateSinkObject state.Sink) {
+func (s *SendBufferSizeOption) StateSave(stateSinkObject state.Sink) {
 	s.beforeSave()
-	stateSinkObject.Save(0, &s.count)
+	stateSinkObject.Save(0, &s.Min)
+	stateSinkObject.Save(1, &s.Default)
+	stateSinkObject.Save(2, &s.Max)
 }
 
-func (s *StatCounter) afterLoad() {}
+func (s *SendBufferSizeOption) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *StatCounter) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &s.count)
+func (s *SendBufferSizeOption) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.Min)
+	stateSourceObject.Load(1, &s.Default)
+	stateSourceObject.Load(2, &s.Max)
 }
 
-func (r *ReceiveErrors) StateTypeName() string {
-	return "pkg/tcpip.ReceiveErrors"
+func (r *ReceiveBufferSizeOption) StateTypeName() string {
+	return "pkg/tcpip.ReceiveBufferSizeOption"
 }
 
-func (r *ReceiveErrors) StateFields() []string {
+func (r *ReceiveBufferSizeOption) StateFields() []string {
 	return []string{
-		"ReceiveBufferOverflow",
-		"MalformedPacketsReceived",
-		"ClosedReceiver",
-		"ChecksumErrors",
+		"Min",
+		"Default",
+		"Max",
 	}
 }
 
-func (r *ReceiveErrors) beforeSave() {}
+func (r *ReceiveBufferSizeOption) beforeSave() {}
 
 // +checklocksignore
-func (r *ReceiveErrors) StateSave(stateSinkObject state.Sink) {
+func (r *ReceiveBufferSizeOption) StateSave(stateSinkObject state.Sink) {
 	r.beforeSave()
-	stateSinkObject.Save(0, &r.ReceiveBufferOverflow)
-	stateSinkObject.Save(1, &r.MalformedPacketsReceived)
-	stateSinkObject.Save(2, &r.ClosedReceiver)
-	stateSinkObject.Save(3, &r.ChecksumErrors)
+	stateSinkObject.Save(0, &r.Min)
+	stateSinkObject.Save(1, &r.Default)
+	stateSinkObject.Save(2, &r.Max)
 }
 
-func (r *ReceiveErrors) afterLoad() {}
+func (r *ReceiveBufferSizeOption) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *ReceiveErrors) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &r.ReceiveBufferOverflow)
-	stateSourceObject.Load(1, &r.MalformedPacketsReceived)
-	stateSourceObject.Load(2, &r.ClosedReceiver)
-	stateSourceObject.Load(3, &r.ChecksumErrors)
+func (r *ReceiveBufferSizeOption) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &r.Min)
+	stateSourceObject.Load(1, &r.Default)
+	stateSourceObject.Load(2, &r.Max)
 }
 
-func (s *SendErrors) StateTypeName() string {
-	return "pkg/tcpip.SendErrors"
+func (r *Route) StateTypeName() string {
+	return "pkg/tcpip.Route"
 }
 
-func (s *SendErrors) StateFields() []string {
+func (r *Route) StateFields() []string {
 	return []string{
-		"SendToNetworkFailed",
-		"NoRoute",
+		"RouteEntry",
+		"Destination",
+		"Gateway",
+		"NIC",
+		"SourceHint",
+		"MTU",
 	}
 }
 
-func (s *SendErrors) beforeSave() {}
+func (r *Route) beforeSave() {}
 
 // +checklocksignore
-func (s *SendErrors) StateSave(stateSinkObject state.Sink) {
+func (r *Route) StateSave(stateSinkObject state.Sink) {
+	r.beforeSave()
+	stateSinkObject.Save(0, &r.RouteEntry)
+	stateSinkObject.Save(1, &r.Destination)
+	stateSinkObject.Save(2, &r.Gateway)
+	stateSinkObject.Save(3, &r.NIC)
+	stateSinkObject.Save(4, &r.SourceHint)
+	stateSinkObject.Save(5, &r.MTU)
+}
+
+func (r *Route) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (r *Route) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &r.RouteEntry)
+	stateSourceObject.Load(1, &r.Destination)
+	stateSourceObject.Load(2, &r.Gateway)
+	stateSourceObject.Load(3, &r.NIC)
+	stateSourceObject.Load(4, &r.SourceHint)
+	stateSourceObject.Load(5, &r.MTU)
+}
+
+func (s *StatCounter) StateTypeName() string {
+	return "pkg/tcpip.StatCounter"
+}
+
+func (s *StatCounter) StateFields() []string {
+	return []string{
+		"count",
+	}
+}
+
+func (s *StatCounter) beforeSave() {}
+
+// +checklocksignore
+func (s *StatCounter) StateSave(stateSinkObject state.Sink) {
 	s.beforeSave()
-	stateSinkObject.Save(0, &s.SendToNetworkFailed)
-	stateSinkObject.Save(1, &s.NoRoute)
+	stateSinkObject.Save(0, &s.count)
 }
 
-func (s *SendErrors) afterLoad() {}
+func (s *StatCounter) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *SendErrors) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &s.SendToNetworkFailed)
-	stateSourceObject.Load(1, &s.NoRoute)
+func (s *StatCounter) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.count)
 }
 
-func (r *ReadErrors) StateTypeName() string {
-	return "pkg/tcpip.ReadErrors"
+func (m *MultiCounterStat) StateTypeName() string {
+	return "pkg/tcpip.MultiCounterStat"
 }
 
-func (r *ReadErrors) StateFields() []string {
+func (m *MultiCounterStat) StateFields() []string {
 	return []string{
-		"ReadClosed",
-		"InvalidEndpointState",
-		"NotConnected",
+		"a",
+		"b",
 	}
 }
 
-func (r *ReadErrors) beforeSave() {}
+func (m *MultiCounterStat) beforeSave() {}
 
 // +checklocksignore
-func (r *ReadErrors) StateSave(stateSinkObject state.Sink) {
-	r.beforeSave()
-	stateSinkObject.Save(0, &r.ReadClosed)
-	stateSinkObject.Save(1, &r.InvalidEndpointState)
-	stateSinkObject.Save(2, &r.NotConnected)
+func (m *MultiCounterStat) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.a)
+	stateSinkObject.Save(1, &m.b)
 }
 
-func (r *ReadErrors) afterLoad() {}
+func (m *MultiCounterStat) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *ReadErrors) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &r.ReadClosed)
-	stateSourceObject.Load(1, &r.InvalidEndpointState)
-	stateSourceObject.Load(2, &r.NotConnected)
+func (m *MultiCounterStat) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.a)
+	stateSourceObject.Load(1, &m.b)
 }
 
-func (w *WriteErrors) StateTypeName() string {
-	return "pkg/tcpip.WriteErrors"
+func (i *ICMPv4PacketStats) StateTypeName() string {
+	return "pkg/tcpip.ICMPv4PacketStats"
 }
 
-func (w *WriteErrors) StateFields() []string {
+func (i *ICMPv4PacketStats) StateFields() []string {
 	return []string{
-		"WriteClosed",
-		"InvalidEndpointState",
-		"InvalidArgs",
+		"EchoRequest",
+		"EchoReply",
+		"DstUnreachable",
+		"SrcQuench",
+		"Redirect",
+		"TimeExceeded",
+		"ParamProblem",
+		"Timestamp",
+		"TimestampReply",
+		"InfoRequest",
+		"InfoReply",
 	}
 }
 
-func (w *WriteErrors) beforeSave() {}
+func (i *ICMPv4PacketStats) beforeSave() {}
 
 // +checklocksignore
-func (w *WriteErrors) StateSave(stateSinkObject state.Sink) {
-	w.beforeSave()
-	stateSinkObject.Save(0, &w.WriteClosed)
-	stateSinkObject.Save(1, &w.InvalidEndpointState)
-	stateSinkObject.Save(2, &w.InvalidArgs)
+func (i *ICMPv4PacketStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.EchoRequest)
+	stateSinkObject.Save(1, &i.EchoReply)
+	stateSinkObject.Save(2, &i.DstUnreachable)
+	stateSinkObject.Save(3, &i.SrcQuench)
+	stateSinkObject.Save(4, &i.Redirect)
+	stateSinkObject.Save(5, &i.TimeExceeded)
+	stateSinkObject.Save(6, &i.ParamProblem)
+	stateSinkObject.Save(7, &i.Timestamp)
+	stateSinkObject.Save(8, &i.TimestampReply)
+	stateSinkObject.Save(9, &i.InfoRequest)
+	stateSinkObject.Save(10, &i.InfoReply)
 }
 
-func (w *WriteErrors) afterLoad() {}
+func (i *ICMPv4PacketStats) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (w *WriteErrors) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &w.WriteClosed)
-	stateSourceObject.Load(1, &w.InvalidEndpointState)
-	stateSourceObject.Load(2, &w.InvalidArgs)
+func (i *ICMPv4PacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.EchoRequest)
+	stateSourceObject.Load(1, &i.EchoReply)
+	stateSourceObject.Load(2, &i.DstUnreachable)
+	stateSourceObject.Load(3, &i.SrcQuench)
+	stateSourceObject.Load(4, &i.Redirect)
+	stateSourceObject.Load(5, &i.TimeExceeded)
+	stateSourceObject.Load(6, &i.ParamProblem)
+	stateSourceObject.Load(7, &i.Timestamp)
+	stateSourceObject.Load(8, &i.TimestampReply)
+	stateSourceObject.Load(9, &i.InfoRequest)
+	stateSourceObject.Load(10, &i.InfoReply)
 }
 
-func (src *TransportEndpointStats) StateTypeName() string {
-	return "pkg/tcpip.TransportEndpointStats"
+func (i *ICMPv4SentPacketStats) StateTypeName() string {
+	return "pkg/tcpip.ICMPv4SentPacketStats"
 }
 
-func (src *TransportEndpointStats) StateFields() []string {
+func (i *ICMPv4SentPacketStats) StateFields() []string {
 	return []string{
-		"PacketsReceived",
-		"PacketsSent",
-		"ReceiveErrors",
-		"ReadErrors",
-		"SendErrors",
-		"WriteErrors",
+		"ICMPv4PacketStats",
+		"Dropped",
+		"RateLimited",
 	}
 }
 
-func (src *TransportEndpointStats) beforeSave() {}
+func (i *ICMPv4SentPacketStats) beforeSave() {}
 
 // +checklocksignore
-func (src *TransportEndpointStats) StateSave(stateSinkObject state.Sink) {
-	src.beforeSave()
-	stateSinkObject.Save(0, &src.PacketsReceived)
-	stateSinkObject.Save(1, &src.PacketsSent)
-	stateSinkObject.Save(2, &src.ReceiveErrors)
-	stateSinkObject.Save(3, &src.ReadErrors)
-	stateSinkObject.Save(4, &src.SendErrors)
-	stateSinkObject.Save(5, &src.WriteErrors)
+func (i *ICMPv4SentPacketStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.ICMPv4PacketStats)
+	stateSinkObject.Save(1, &i.Dropped)
+	stateSinkObject.Save(2, &i.RateLimited)
 }
 
-func (src *TransportEndpointStats) afterLoad() {}
+func (i *ICMPv4SentPacketStats) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (src *TransportEndpointStats) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &src.PacketsReceived)
-	stateSourceObject.Load(1, &src.PacketsSent)
-	stateSourceObject.Load(2, &src.ReceiveErrors)
-	stateSourceObject.Load(3, &src.ReadErrors)
-	stateSourceObject.Load(4, &src.SendErrors)
-	stateSourceObject.Load(5, &src.WriteErrors)
+func (i *ICMPv4SentPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.ICMPv4PacketStats)
+	stateSourceObject.Load(1, &i.Dropped)
+	stateSourceObject.Load(2, &i.RateLimited)
 }
 
-func (a *AddressWithPrefix) StateTypeName() string {
-	return "pkg/tcpip.AddressWithPrefix"
+func (i *ICMPv4ReceivedPacketStats) StateTypeName() string {
+	return "pkg/tcpip.ICMPv4ReceivedPacketStats"
 }
 
-func (a *AddressWithPrefix) StateFields() []string {
+func (i *ICMPv4ReceivedPacketStats) StateFields() []string {
 	return []string{
-		"Address",
-		"PrefixLen",
+		"ICMPv4PacketStats",
+		"Invalid",
 	}
 }
 
-func (a *AddressWithPrefix) beforeSave() {}
+func (i *ICMPv4ReceivedPacketStats) beforeSave() {}
 
 // +checklocksignore
-func (a *AddressWithPrefix) StateSave(stateSinkObject state.Sink) {
-	a.beforeSave()
-	stateSinkObject.Save(0, &a.Address)
-	stateSinkObject.Save(1, &a.PrefixLen)
+func (i *ICMPv4ReceivedPacketStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.ICMPv4PacketStats)
+	stateSinkObject.Save(1, &i.Invalid)
 }
 
-func (a *AddressWithPrefix) afterLoad() {}
+func (i *ICMPv4ReceivedPacketStats) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (a *AddressWithPrefix) StateLoad(stateSourceObject state.Source) {
-	stateSourceObject.Load(0, &a.Address)
-	stateSourceObject.Load(1, &a.PrefixLen)
+func (i *ICMPv4ReceivedPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.ICMPv4PacketStats)
+	stateSourceObject.Load(1, &i.Invalid)
+}
+
+func (i *ICMPv4Stats) StateTypeName() string {
+	return "pkg/tcpip.ICMPv4Stats"
+}
+
+func (i *ICMPv4Stats) StateFields() []string {
+	return []string{
+		"PacketsSent",
+		"PacketsReceived",
+	}
+}
+
+func (i *ICMPv4Stats) beforeSave() {}
+
+// +checklocksignore
+func (i *ICMPv4Stats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.PacketsSent)
+	stateSinkObject.Save(1, &i.PacketsReceived)
+}
+
+func (i *ICMPv4Stats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *ICMPv4Stats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.PacketsSent)
+	stateSourceObject.Load(1, &i.PacketsReceived)
+}
+
+func (i *ICMPv6PacketStats) StateTypeName() string {
+	return "pkg/tcpip.ICMPv6PacketStats"
+}
+
+func (i *ICMPv6PacketStats) StateFields() []string {
+	return []string{
+		"EchoRequest",
+		"EchoReply",
+		"DstUnreachable",
+		"PacketTooBig",
+		"TimeExceeded",
+		"ParamProblem",
+		"RouterSolicit",
+		"RouterAdvert",
+		"NeighborSolicit",
+		"NeighborAdvert",
+		"RedirectMsg",
+		"MulticastListenerQuery",
+		"MulticastListenerReport",
+		"MulticastListenerReportV2",
+		"MulticastListenerDone",
+	}
+}
+
+func (i *ICMPv6PacketStats) beforeSave() {}
+
+// +checklocksignore
+func (i *ICMPv6PacketStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.EchoRequest)
+	stateSinkObject.Save(1, &i.EchoReply)
+	stateSinkObject.Save(2, &i.DstUnreachable)
+	stateSinkObject.Save(3, &i.PacketTooBig)
+	stateSinkObject.Save(4, &i.TimeExceeded)
+	stateSinkObject.Save(5, &i.ParamProblem)
+	stateSinkObject.Save(6, &i.RouterSolicit)
+	stateSinkObject.Save(7, &i.RouterAdvert)
+	stateSinkObject.Save(8, &i.NeighborSolicit)
+	stateSinkObject.Save(9, &i.NeighborAdvert)
+	stateSinkObject.Save(10, &i.RedirectMsg)
+	stateSinkObject.Save(11, &i.MulticastListenerQuery)
+	stateSinkObject.Save(12, &i.MulticastListenerReport)
+	stateSinkObject.Save(13, &i.MulticastListenerReportV2)
+	stateSinkObject.Save(14, &i.MulticastListenerDone)
+}
+
+func (i *ICMPv6PacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *ICMPv6PacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.EchoRequest)
+	stateSourceObject.Load(1, &i.EchoReply)
+	stateSourceObject.Load(2, &i.DstUnreachable)
+	stateSourceObject.Load(3, &i.PacketTooBig)
+	stateSourceObject.Load(4, &i.TimeExceeded)
+	stateSourceObject.Load(5, &i.ParamProblem)
+	stateSourceObject.Load(6, &i.RouterSolicit)
+	stateSourceObject.Load(7, &i.RouterAdvert)
+	stateSourceObject.Load(8, &i.NeighborSolicit)
+	stateSourceObject.Load(9, &i.NeighborAdvert)
+	stateSourceObject.Load(10, &i.RedirectMsg)
+	stateSourceObject.Load(11, &i.MulticastListenerQuery)
+	stateSourceObject.Load(12, &i.MulticastListenerReport)
+	stateSourceObject.Load(13, &i.MulticastListenerReportV2)
+	stateSourceObject.Load(14, &i.MulticastListenerDone)
+}
+
+func (i *ICMPv6SentPacketStats) StateTypeName() string {
+	return "pkg/tcpip.ICMPv6SentPacketStats"
+}
+
+func (i *ICMPv6SentPacketStats) StateFields() []string {
+	return []string{
+		"ICMPv6PacketStats",
+		"Dropped",
+		"RateLimited",
+	}
+}
+
+func (i *ICMPv6SentPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (i *ICMPv6SentPacketStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.ICMPv6PacketStats)
+	stateSinkObject.Save(1, &i.Dropped)
+	stateSinkObject.Save(2, &i.RateLimited)
+}
+
+func (i *ICMPv6SentPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *ICMPv6SentPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.ICMPv6PacketStats)
+	stateSourceObject.Load(1, &i.Dropped)
+	stateSourceObject.Load(2, &i.RateLimited)
+}
+
+func (i *ICMPv6ReceivedPacketStats) StateTypeName() string {
+	return "pkg/tcpip.ICMPv6ReceivedPacketStats"
+}
+
+func (i *ICMPv6ReceivedPacketStats) StateFields() []string {
+	return []string{
+		"ICMPv6PacketStats",
+		"Unrecognized",
+		"Invalid",
+		"RouterOnlyPacketsDroppedByHost",
+	}
+}
+
+func (i *ICMPv6ReceivedPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (i *ICMPv6ReceivedPacketStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.ICMPv6PacketStats)
+	stateSinkObject.Save(1, &i.Unrecognized)
+	stateSinkObject.Save(2, &i.Invalid)
+	stateSinkObject.Save(3, &i.RouterOnlyPacketsDroppedByHost)
+}
+
+func (i *ICMPv6ReceivedPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *ICMPv6ReceivedPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.ICMPv6PacketStats)
+	stateSourceObject.Load(1, &i.Unrecognized)
+	stateSourceObject.Load(2, &i.Invalid)
+	stateSourceObject.Load(3, &i.RouterOnlyPacketsDroppedByHost)
+}
+
+func (i *ICMPv6Stats) StateTypeName() string {
+	return "pkg/tcpip.ICMPv6Stats"
+}
+
+func (i *ICMPv6Stats) StateFields() []string {
+	return []string{
+		"PacketsSent",
+		"PacketsReceived",
+	}
+}
+
+func (i *ICMPv6Stats) beforeSave() {}
+
+// +checklocksignore
+func (i *ICMPv6Stats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.PacketsSent)
+	stateSinkObject.Save(1, &i.PacketsReceived)
+}
+
+func (i *ICMPv6Stats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *ICMPv6Stats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.PacketsSent)
+	stateSourceObject.Load(1, &i.PacketsReceived)
+}
+
+func (i *ICMPStats) StateTypeName() string {
+	return "pkg/tcpip.ICMPStats"
+}
+
+func (i *ICMPStats) StateFields() []string {
+	return []string{
+		"V4",
+		"V6",
+	}
+}
+
+func (i *ICMPStats) beforeSave() {}
+
+// +checklocksignore
+func (i *ICMPStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.V4)
+	stateSinkObject.Save(1, &i.V6)
+}
+
+func (i *ICMPStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *ICMPStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.V4)
+	stateSourceObject.Load(1, &i.V6)
+}
+
+func (i *IGMPPacketStats) StateTypeName() string {
+	return "pkg/tcpip.IGMPPacketStats"
+}
+
+func (i *IGMPPacketStats) StateFields() []string {
+	return []string{
+		"MembershipQuery",
+		"V1MembershipReport",
+		"V2MembershipReport",
+		"V3MembershipReport",
+		"LeaveGroup",
+	}
+}
+
+func (i *IGMPPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (i *IGMPPacketStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.MembershipQuery)
+	stateSinkObject.Save(1, &i.V1MembershipReport)
+	stateSinkObject.Save(2, &i.V2MembershipReport)
+	stateSinkObject.Save(3, &i.V3MembershipReport)
+	stateSinkObject.Save(4, &i.LeaveGroup)
+}
+
+func (i *IGMPPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *IGMPPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.MembershipQuery)
+	stateSourceObject.Load(1, &i.V1MembershipReport)
+	stateSourceObject.Load(2, &i.V2MembershipReport)
+	stateSourceObject.Load(3, &i.V3MembershipReport)
+	stateSourceObject.Load(4, &i.LeaveGroup)
+}
+
+func (i *IGMPSentPacketStats) StateTypeName() string {
+	return "pkg/tcpip.IGMPSentPacketStats"
+}
+
+func (i *IGMPSentPacketStats) StateFields() []string {
+	return []string{
+		"IGMPPacketStats",
+		"Dropped",
+	}
+}
+
+func (i *IGMPSentPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (i *IGMPSentPacketStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.IGMPPacketStats)
+	stateSinkObject.Save(1, &i.Dropped)
+}
+
+func (i *IGMPSentPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *IGMPSentPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.IGMPPacketStats)
+	stateSourceObject.Load(1, &i.Dropped)
+}
+
+func (i *IGMPReceivedPacketStats) StateTypeName() string {
+	return "pkg/tcpip.IGMPReceivedPacketStats"
+}
+
+func (i *IGMPReceivedPacketStats) StateFields() []string {
+	return []string{
+		"IGMPPacketStats",
+		"Invalid",
+		"ChecksumErrors",
+		"Unrecognized",
+	}
+}
+
+func (i *IGMPReceivedPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (i *IGMPReceivedPacketStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.IGMPPacketStats)
+	stateSinkObject.Save(1, &i.Invalid)
+	stateSinkObject.Save(2, &i.ChecksumErrors)
+	stateSinkObject.Save(3, &i.Unrecognized)
+}
+
+func (i *IGMPReceivedPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *IGMPReceivedPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.IGMPPacketStats)
+	stateSourceObject.Load(1, &i.Invalid)
+	stateSourceObject.Load(2, &i.ChecksumErrors)
+	stateSourceObject.Load(3, &i.Unrecognized)
+}
+
+func (i *IGMPStats) StateTypeName() string {
+	return "pkg/tcpip.IGMPStats"
+}
+
+func (i *IGMPStats) StateFields() []string {
+	return []string{
+		"PacketsSent",
+		"PacketsReceived",
+	}
+}
+
+func (i *IGMPStats) beforeSave() {}
+
+// +checklocksignore
+func (i *IGMPStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.PacketsSent)
+	stateSinkObject.Save(1, &i.PacketsReceived)
+}
+
+func (i *IGMPStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *IGMPStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.PacketsSent)
+	stateSourceObject.Load(1, &i.PacketsReceived)
+}
+
+func (i *IPForwardingStats) StateTypeName() string {
+	return "pkg/tcpip.IPForwardingStats"
+}
+
+func (i *IPForwardingStats) StateFields() []string {
+	return []string{
+		"Unrouteable",
+		"ExhaustedTTL",
+		"InitializingSource",
+		"LinkLocalSource",
+		"LinkLocalDestination",
+		"PacketTooBig",
+		"HostUnreachable",
+		"ExtensionHeaderProblem",
+		"UnexpectedMulticastInputInterface",
+		"UnknownOutputEndpoint",
+		"NoMulticastPendingQueueBufferSpace",
+		"OutgoingDeviceNoBufferSpace",
+		"Errors",
+	}
+}
+
+func (i *IPForwardingStats) beforeSave() {}
+
+// +checklocksignore
+func (i *IPForwardingStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.Unrouteable)
+	stateSinkObject.Save(1, &i.ExhaustedTTL)
+	stateSinkObject.Save(2, &i.InitializingSource)
+	stateSinkObject.Save(3, &i.LinkLocalSource)
+	stateSinkObject.Save(4, &i.LinkLocalDestination)
+	stateSinkObject.Save(5, &i.PacketTooBig)
+	stateSinkObject.Save(6, &i.HostUnreachable)
+	stateSinkObject.Save(7, &i.ExtensionHeaderProblem)
+	stateSinkObject.Save(8, &i.UnexpectedMulticastInputInterface)
+	stateSinkObject.Save(9, &i.UnknownOutputEndpoint)
+	stateSinkObject.Save(10, &i.NoMulticastPendingQueueBufferSpace)
+	stateSinkObject.Save(11, &i.OutgoingDeviceNoBufferSpace)
+	stateSinkObject.Save(12, &i.Errors)
+}
+
+func (i *IPForwardingStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *IPForwardingStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.Unrouteable)
+	stateSourceObject.Load(1, &i.ExhaustedTTL)
+	stateSourceObject.Load(2, &i.InitializingSource)
+	stateSourceObject.Load(3, &i.LinkLocalSource)
+	stateSourceObject.Load(4, &i.LinkLocalDestination)
+	stateSourceObject.Load(5, &i.PacketTooBig)
+	stateSourceObject.Load(6, &i.HostUnreachable)
+	stateSourceObject.Load(7, &i.ExtensionHeaderProblem)
+	stateSourceObject.Load(8, &i.UnexpectedMulticastInputInterface)
+	stateSourceObject.Load(9, &i.UnknownOutputEndpoint)
+	stateSourceObject.Load(10, &i.NoMulticastPendingQueueBufferSpace)
+	stateSourceObject.Load(11, &i.OutgoingDeviceNoBufferSpace)
+	stateSourceObject.Load(12, &i.Errors)
+}
+
+func (i *IPStats) StateTypeName() string {
+	return "pkg/tcpip.IPStats"
+}
+
+func (i *IPStats) StateFields() []string {
+	return []string{
+		"PacketsReceived",
+		"ValidPacketsReceived",
+		"DisabledPacketsReceived",
+		"InvalidDestinationAddressesReceived",
+		"InvalidSourceAddressesReceived",
+		"PacketsDelivered",
+		"PacketsSent",
+		"OutgoingPacketErrors",
+		"MalformedPacketsReceived",
+		"MalformedFragmentsReceived",
+		"IPTablesPreroutingDropped",
+		"IPTablesInputDropped",
+		"IPTablesForwardDropped",
+		"IPTablesOutputDropped",
+		"IPTablesPostroutingDropped",
+		"OptionTimestampReceived",
+		"OptionRecordRouteReceived",
+		"OptionRouterAlertReceived",
+		"OptionUnknownReceived",
+		"Forwarding",
+	}
+}
+
+func (i *IPStats) beforeSave() {}
+
+// +checklocksignore
+func (i *IPStats) StateSave(stateSinkObject state.Sink) {
+	i.beforeSave()
+	stateSinkObject.Save(0, &i.PacketsReceived)
+	stateSinkObject.Save(1, &i.ValidPacketsReceived)
+	stateSinkObject.Save(2, &i.DisabledPacketsReceived)
+	stateSinkObject.Save(3, &i.InvalidDestinationAddressesReceived)
+	stateSinkObject.Save(4, &i.InvalidSourceAddressesReceived)
+	stateSinkObject.Save(5, &i.PacketsDelivered)
+	stateSinkObject.Save(6, &i.PacketsSent)
+	stateSinkObject.Save(7, &i.OutgoingPacketErrors)
+	stateSinkObject.Save(8, &i.MalformedPacketsReceived)
+	stateSinkObject.Save(9, &i.MalformedFragmentsReceived)
+	stateSinkObject.Save(10, &i.IPTablesPreroutingDropped)
+	stateSinkObject.Save(11, &i.IPTablesInputDropped)
+	stateSinkObject.Save(12, &i.IPTablesForwardDropped)
+	stateSinkObject.Save(13, &i.IPTablesOutputDropped)
+	stateSinkObject.Save(14, &i.IPTablesPostroutingDropped)
+	stateSinkObject.Save(15, &i.OptionTimestampReceived)
+	stateSinkObject.Save(16, &i.OptionRecordRouteReceived)
+	stateSinkObject.Save(17, &i.OptionRouterAlertReceived)
+	stateSinkObject.Save(18, &i.OptionUnknownReceived)
+	stateSinkObject.Save(19, &i.Forwarding)
+}
+
+func (i *IPStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (i *IPStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &i.PacketsReceived)
+	stateSourceObject.Load(1, &i.ValidPacketsReceived)
+	stateSourceObject.Load(2, &i.DisabledPacketsReceived)
+	stateSourceObject.Load(3, &i.InvalidDestinationAddressesReceived)
+	stateSourceObject.Load(4, &i.InvalidSourceAddressesReceived)
+	stateSourceObject.Load(5, &i.PacketsDelivered)
+	stateSourceObject.Load(6, &i.PacketsSent)
+	stateSourceObject.Load(7, &i.OutgoingPacketErrors)
+	stateSourceObject.Load(8, &i.MalformedPacketsReceived)
+	stateSourceObject.Load(9, &i.MalformedFragmentsReceived)
+	stateSourceObject.Load(10, &i.IPTablesPreroutingDropped)
+	stateSourceObject.Load(11, &i.IPTablesInputDropped)
+	stateSourceObject.Load(12, &i.IPTablesForwardDropped)
+	stateSourceObject.Load(13, &i.IPTablesOutputDropped)
+	stateSourceObject.Load(14, &i.IPTablesPostroutingDropped)
+	stateSourceObject.Load(15, &i.OptionTimestampReceived)
+	stateSourceObject.Load(16, &i.OptionRecordRouteReceived)
+	stateSourceObject.Load(17, &i.OptionRouterAlertReceived)
+	stateSourceObject.Load(18, &i.OptionUnknownReceived)
+	stateSourceObject.Load(19, &i.Forwarding)
+}
+
+func (a *ARPStats) StateTypeName() string {
+	return "pkg/tcpip.ARPStats"
+}
+
+func (a *ARPStats) StateFields() []string {
+	return []string{
+		"PacketsReceived",
+		"DisabledPacketsReceived",
+		"MalformedPacketsReceived",
+		"RequestsReceived",
+		"RequestsReceivedUnknownTargetAddress",
+		"OutgoingRequestInterfaceHasNoLocalAddressErrors",
+		"OutgoingRequestBadLocalAddressErrors",
+		"OutgoingRequestsDropped",
+		"OutgoingRequestsSent",
+		"RepliesReceived",
+		"OutgoingRepliesDropped",
+		"OutgoingRepliesSent",
+	}
+}
+
+func (a *ARPStats) beforeSave() {}
+
+// +checklocksignore
+func (a *ARPStats) StateSave(stateSinkObject state.Sink) {
+	a.beforeSave()
+	stateSinkObject.Save(0, &a.PacketsReceived)
+	stateSinkObject.Save(1, &a.DisabledPacketsReceived)
+	stateSinkObject.Save(2, &a.MalformedPacketsReceived)
+	stateSinkObject.Save(3, &a.RequestsReceived)
+	stateSinkObject.Save(4, &a.RequestsReceivedUnknownTargetAddress)
+	stateSinkObject.Save(5, &a.OutgoingRequestInterfaceHasNoLocalAddressErrors)
+	stateSinkObject.Save(6, &a.OutgoingRequestBadLocalAddressErrors)
+	stateSinkObject.Save(7, &a.OutgoingRequestsDropped)
+	stateSinkObject.Save(8, &a.OutgoingRequestsSent)
+	stateSinkObject.Save(9, &a.RepliesReceived)
+	stateSinkObject.Save(10, &a.OutgoingRepliesDropped)
+	stateSinkObject.Save(11, &a.OutgoingRepliesSent)
+}
+
+func (a *ARPStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (a *ARPStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &a.PacketsReceived)
+	stateSourceObject.Load(1, &a.DisabledPacketsReceived)
+	stateSourceObject.Load(2, &a.MalformedPacketsReceived)
+	stateSourceObject.Load(3, &a.RequestsReceived)
+	stateSourceObject.Load(4, &a.RequestsReceivedUnknownTargetAddress)
+	stateSourceObject.Load(5, &a.OutgoingRequestInterfaceHasNoLocalAddressErrors)
+	stateSourceObject.Load(6, &a.OutgoingRequestBadLocalAddressErrors)
+	stateSourceObject.Load(7, &a.OutgoingRequestsDropped)
+	stateSourceObject.Load(8, &a.OutgoingRequestsSent)
+	stateSourceObject.Load(9, &a.RepliesReceived)
+	stateSourceObject.Load(10, &a.OutgoingRepliesDropped)
+	stateSourceObject.Load(11, &a.OutgoingRepliesSent)
+}
+
+func (t *TCPStats) StateTypeName() string {
+	return "pkg/tcpip.TCPStats"
+}
+
+func (t *TCPStats) StateFields() []string {
+	return []string{
+		"ActiveConnectionOpenings",
+		"PassiveConnectionOpenings",
+		"CurrentEstablished",
+		"CurrentConnected",
+		"EstablishedResets",
+		"EstablishedClosed",
+		"EstablishedTimedout",
+		"ListenOverflowSynDrop",
+		"ListenOverflowAckDrop",
+		"ListenOverflowSynCookieSent",
+		"ListenOverflowSynCookieRcvd",
+		"ListenOverflowInvalidSynCookieRcvd",
+		"FailedConnectionAttempts",
+		"ValidSegmentsReceived",
+		"InvalidSegmentsReceived",
+		"SegmentsSent",
+		"SegmentSendErrors",
+		"ResetsSent",
+		"ResetsReceived",
+		"Retransmits",
+		"FastRecovery",
+		"SACKRecovery",
+		"TLPRecovery",
+		"SlowStartRetransmits",
+		"FastRetransmit",
+		"Timeouts",
+		"ChecksumErrors",
+		"FailedPortReservations",
+		"SegmentsAckedWithDSACK",
+		"SpuriousRecovery",
+		"SpuriousRTORecovery",
+		"ForwardMaxInFlightDrop",
+	}
+}
+
+func (t *TCPStats) beforeSave() {}
+
+// +checklocksignore
+func (t *TCPStats) StateSave(stateSinkObject state.Sink) {
+	t.beforeSave()
+	stateSinkObject.Save(0, &t.ActiveConnectionOpenings)
+	stateSinkObject.Save(1, &t.PassiveConnectionOpenings)
+	stateSinkObject.Save(2, &t.CurrentEstablished)
+	stateSinkObject.Save(3, &t.CurrentConnected)
+	stateSinkObject.Save(4, &t.EstablishedResets)
+	stateSinkObject.Save(5, &t.EstablishedClosed)
+	stateSinkObject.Save(6, &t.EstablishedTimedout)
+	stateSinkObject.Save(7, &t.ListenOverflowSynDrop)
+	stateSinkObject.Save(8, &t.ListenOverflowAckDrop)
+	stateSinkObject.Save(9, &t.ListenOverflowSynCookieSent)
+	stateSinkObject.Save(10, &t.ListenOverflowSynCookieRcvd)
+	stateSinkObject.Save(11, &t.ListenOverflowInvalidSynCookieRcvd)
+	stateSinkObject.Save(12, &t.FailedConnectionAttempts)
+	stateSinkObject.Save(13, &t.ValidSegmentsReceived)
+	stateSinkObject.Save(14, &t.InvalidSegmentsReceived)
+	stateSinkObject.Save(15, &t.SegmentsSent)
+	stateSinkObject.Save(16, &t.SegmentSendErrors)
+	stateSinkObject.Save(17, &t.ResetsSent)
+	stateSinkObject.Save(18, &t.ResetsReceived)
+	stateSinkObject.Save(19, &t.Retransmits)
+	stateSinkObject.Save(20, &t.FastRecovery)
+	stateSinkObject.Save(21, &t.SACKRecovery)
+	stateSinkObject.Save(22, &t.TLPRecovery)
+	stateSinkObject.Save(23, &t.SlowStartRetransmits)
+	stateSinkObject.Save(24, &t.FastRetransmit)
+	stateSinkObject.Save(25, &t.Timeouts)
+	stateSinkObject.Save(26, &t.ChecksumErrors)
+	stateSinkObject.Save(27, &t.FailedPortReservations)
+	stateSinkObject.Save(28, &t.SegmentsAckedWithDSACK)
+	stateSinkObject.Save(29, &t.SpuriousRecovery)
+	stateSinkObject.Save(30, &t.SpuriousRTORecovery)
+	stateSinkObject.Save(31, &t.ForwardMaxInFlightDrop)
+}
+
+func (t *TCPStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (t *TCPStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &t.ActiveConnectionOpenings)
+	stateSourceObject.Load(1, &t.PassiveConnectionOpenings)
+	stateSourceObject.Load(2, &t.CurrentEstablished)
+	stateSourceObject.Load(3, &t.CurrentConnected)
+	stateSourceObject.Load(4, &t.EstablishedResets)
+	stateSourceObject.Load(5, &t.EstablishedClosed)
+	stateSourceObject.Load(6, &t.EstablishedTimedout)
+	stateSourceObject.Load(7, &t.ListenOverflowSynDrop)
+	stateSourceObject.Load(8, &t.ListenOverflowAckDrop)
+	stateSourceObject.Load(9, &t.ListenOverflowSynCookieSent)
+	stateSourceObject.Load(10, &t.ListenOverflowSynCookieRcvd)
+	stateSourceObject.Load(11, &t.ListenOverflowInvalidSynCookieRcvd)
+	stateSourceObject.Load(12, &t.FailedConnectionAttempts)
+	stateSourceObject.Load(13, &t.ValidSegmentsReceived)
+	stateSourceObject.Load(14, &t.InvalidSegmentsReceived)
+	stateSourceObject.Load(15, &t.SegmentsSent)
+	stateSourceObject.Load(16, &t.SegmentSendErrors)
+	stateSourceObject.Load(17, &t.ResetsSent)
+	stateSourceObject.Load(18, &t.ResetsReceived)
+	stateSourceObject.Load(19, &t.Retransmits)
+	stateSourceObject.Load(20, &t.FastRecovery)
+	stateSourceObject.Load(21, &t.SACKRecovery)
+	stateSourceObject.Load(22, &t.TLPRecovery)
+	stateSourceObject.Load(23, &t.SlowStartRetransmits)
+	stateSourceObject.Load(24, &t.FastRetransmit)
+	stateSourceObject.Load(25, &t.Timeouts)
+	stateSourceObject.Load(26, &t.ChecksumErrors)
+	stateSourceObject.Load(27, &t.FailedPortReservations)
+	stateSourceObject.Load(28, &t.SegmentsAckedWithDSACK)
+	stateSourceObject.Load(29, &t.SpuriousRecovery)
+	stateSourceObject.Load(30, &t.SpuriousRTORecovery)
+	stateSourceObject.Load(31, &t.ForwardMaxInFlightDrop)
+}
+
+func (u *UDPStats) StateTypeName() string {
+	return "pkg/tcpip.UDPStats"
+}
+
+func (u *UDPStats) StateFields() []string {
+	return []string{
+		"PacketsReceived",
+		"UnknownPortErrors",
+		"ReceiveBufferErrors",
+		"MalformedPacketsReceived",
+		"PacketsSent",
+		"PacketSendErrors",
+		"ChecksumErrors",
+	}
+}
+
+func (u *UDPStats) beforeSave() {}
+
+// +checklocksignore
+func (u *UDPStats) StateSave(stateSinkObject state.Sink) {
+	u.beforeSave()
+	stateSinkObject.Save(0, &u.PacketsReceived)
+	stateSinkObject.Save(1, &u.UnknownPortErrors)
+	stateSinkObject.Save(2, &u.ReceiveBufferErrors)
+	stateSinkObject.Save(3, &u.MalformedPacketsReceived)
+	stateSinkObject.Save(4, &u.PacketsSent)
+	stateSinkObject.Save(5, &u.PacketSendErrors)
+	stateSinkObject.Save(6, &u.ChecksumErrors)
+}
+
+func (u *UDPStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (u *UDPStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &u.PacketsReceived)
+	stateSourceObject.Load(1, &u.UnknownPortErrors)
+	stateSourceObject.Load(2, &u.ReceiveBufferErrors)
+	stateSourceObject.Load(3, &u.MalformedPacketsReceived)
+	stateSourceObject.Load(4, &u.PacketsSent)
+	stateSourceObject.Load(5, &u.PacketSendErrors)
+	stateSourceObject.Load(6, &u.ChecksumErrors)
+}
+
+func (n *NICNeighborStats) StateTypeName() string {
+	return "pkg/tcpip.NICNeighborStats"
+}
+
+func (n *NICNeighborStats) StateFields() []string {
+	return []string{
+		"UnreachableEntryLookups",
+		"DroppedConfirmationForNoninitiatedNeighbor",
+		"DroppedInvalidLinkAddressConfirmations",
+	}
+}
+
+func (n *NICNeighborStats) beforeSave() {}
+
+// +checklocksignore
+func (n *NICNeighborStats) StateSave(stateSinkObject state.Sink) {
+	n.beforeSave()
+	stateSinkObject.Save(0, &n.UnreachableEntryLookups)
+	stateSinkObject.Save(1, &n.DroppedConfirmationForNoninitiatedNeighbor)
+	stateSinkObject.Save(2, &n.DroppedInvalidLinkAddressConfirmations)
+}
+
+func (n *NICNeighborStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (n *NICNeighborStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &n.UnreachableEntryLookups)
+	stateSourceObject.Load(1, &n.DroppedConfirmationForNoninitiatedNeighbor)
+	stateSourceObject.Load(2, &n.DroppedInvalidLinkAddressConfirmations)
+}
+
+func (n *NICPacketStats) StateTypeName() string {
+	return "pkg/tcpip.NICPacketStats"
+}
+
+func (n *NICPacketStats) StateFields() []string {
+	return []string{
+		"Packets",
+		"Bytes",
+	}
+}
+
+func (n *NICPacketStats) beforeSave() {}
+
+// +checklocksignore
+func (n *NICPacketStats) StateSave(stateSinkObject state.Sink) {
+	n.beforeSave()
+	stateSinkObject.Save(0, &n.Packets)
+	stateSinkObject.Save(1, &n.Bytes)
+}
+
+func (n *NICPacketStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (n *NICPacketStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &n.Packets)
+	stateSourceObject.Load(1, &n.Bytes)
+}
+
+func (m *IntegralStatCounterMap) StateTypeName() string {
+	return "pkg/tcpip.IntegralStatCounterMap"
+}
+
+func (m *IntegralStatCounterMap) StateFields() []string {
+	return []string{
+		"counterMap",
+	}
+}
+
+func (m *IntegralStatCounterMap) beforeSave() {}
+
+// +checklocksignore
+func (m *IntegralStatCounterMap) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.counterMap)
+}
+
+func (m *IntegralStatCounterMap) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *IntegralStatCounterMap) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.counterMap)
+}
+
+func (m *MultiIntegralStatCounterMap) StateTypeName() string {
+	return "pkg/tcpip.MultiIntegralStatCounterMap"
+}
+
+func (m *MultiIntegralStatCounterMap) StateFields() []string {
+	return []string{
+		"a",
+		"b",
+	}
+}
+
+func (m *MultiIntegralStatCounterMap) beforeSave() {}
+
+// +checklocksignore
+func (m *MultiIntegralStatCounterMap) StateSave(stateSinkObject state.Sink) {
+	m.beforeSave()
+	stateSinkObject.Save(0, &m.a)
+	stateSinkObject.Save(1, &m.b)
+}
+
+func (m *MultiIntegralStatCounterMap) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (m *MultiIntegralStatCounterMap) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &m.a)
+	stateSourceObject.Load(1, &m.b)
+}
+
+func (s *NICStats) StateTypeName() string {
+	return "pkg/tcpip.NICStats"
+}
+
+func (s *NICStats) StateFields() []string {
+	return []string{
+		"UnknownL3ProtocolRcvdPacketCounts",
+		"UnknownL4ProtocolRcvdPacketCounts",
+		"MalformedL4RcvdPackets",
+		"Tx",
+		"TxPacketsDroppedNoBufferSpace",
+		"Rx",
+		"DisabledRx",
+		"Neighbor",
+	}
+}
+
+func (s *NICStats) beforeSave() {}
+
+// +checklocksignore
+func (s *NICStats) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.UnknownL3ProtocolRcvdPacketCounts)
+	stateSinkObject.Save(1, &s.UnknownL4ProtocolRcvdPacketCounts)
+	stateSinkObject.Save(2, &s.MalformedL4RcvdPackets)
+	stateSinkObject.Save(3, &s.Tx)
+	stateSinkObject.Save(4, &s.TxPacketsDroppedNoBufferSpace)
+	stateSinkObject.Save(5, &s.Rx)
+	stateSinkObject.Save(6, &s.DisabledRx)
+	stateSinkObject.Save(7, &s.Neighbor)
+}
+
+func (s *NICStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *NICStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.UnknownL3ProtocolRcvdPacketCounts)
+	stateSourceObject.Load(1, &s.UnknownL4ProtocolRcvdPacketCounts)
+	stateSourceObject.Load(2, &s.MalformedL4RcvdPackets)
+	stateSourceObject.Load(3, &s.Tx)
+	stateSourceObject.Load(4, &s.TxPacketsDroppedNoBufferSpace)
+	stateSourceObject.Load(5, &s.Rx)
+	stateSourceObject.Load(6, &s.DisabledRx)
+	stateSourceObject.Load(7, &s.Neighbor)
+}
+
+func (s *Stats) StateTypeName() string {
+	return "pkg/tcpip.Stats"
+}
+
+func (s *Stats) StateFields() []string {
+	return []string{
+		"DroppedPackets",
+		"NICs",
+		"ICMP",
+		"IGMP",
+		"IP",
+		"ARP",
+		"TCP",
+		"UDP",
+	}
+}
+
+func (s *Stats) beforeSave() {}
+
+// +checklocksignore
+func (s *Stats) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.DroppedPackets)
+	stateSinkObject.Save(1, &s.NICs)
+	stateSinkObject.Save(2, &s.ICMP)
+	stateSinkObject.Save(3, &s.IGMP)
+	stateSinkObject.Save(4, &s.IP)
+	stateSinkObject.Save(5, &s.ARP)
+	stateSinkObject.Save(6, &s.TCP)
+	stateSinkObject.Save(7, &s.UDP)
+}
+
+func (s *Stats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *Stats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.DroppedPackets)
+	stateSourceObject.Load(1, &s.NICs)
+	stateSourceObject.Load(2, &s.ICMP)
+	stateSourceObject.Load(3, &s.IGMP)
+	stateSourceObject.Load(4, &s.IP)
+	stateSourceObject.Load(5, &s.ARP)
+	stateSourceObject.Load(6, &s.TCP)
+	stateSourceObject.Load(7, &s.UDP)
+}
+
+func (r *ReceiveErrors) StateTypeName() string {
+	return "pkg/tcpip.ReceiveErrors"
+}
+
+func (r *ReceiveErrors) StateFields() []string {
+	return []string{
+		"ReceiveBufferOverflow",
+		"MalformedPacketsReceived",
+		"ClosedReceiver",
+		"ChecksumErrors",
+	}
+}
+
+func (r *ReceiveErrors) beforeSave() {}
+
+// +checklocksignore
+func (r *ReceiveErrors) StateSave(stateSinkObject state.Sink) {
+	r.beforeSave()
+	stateSinkObject.Save(0, &r.ReceiveBufferOverflow)
+	stateSinkObject.Save(1, &r.MalformedPacketsReceived)
+	stateSinkObject.Save(2, &r.ClosedReceiver)
+	stateSinkObject.Save(3, &r.ChecksumErrors)
+}
+
+func (r *ReceiveErrors) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (r *ReceiveErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &r.ReceiveBufferOverflow)
+	stateSourceObject.Load(1, &r.MalformedPacketsReceived)
+	stateSourceObject.Load(2, &r.ClosedReceiver)
+	stateSourceObject.Load(3, &r.ChecksumErrors)
+}
+
+func (s *SendErrors) StateTypeName() string {
+	return "pkg/tcpip.SendErrors"
+}
+
+func (s *SendErrors) StateFields() []string {
+	return []string{
+		"SendToNetworkFailed",
+		"NoRoute",
+	}
+}
+
+func (s *SendErrors) beforeSave() {}
+
+// +checklocksignore
+func (s *SendErrors) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.SendToNetworkFailed)
+	stateSinkObject.Save(1, &s.NoRoute)
+}
+
+func (s *SendErrors) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *SendErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.SendToNetworkFailed)
+	stateSourceObject.Load(1, &s.NoRoute)
+}
+
+func (r *ReadErrors) StateTypeName() string {
+	return "pkg/tcpip.ReadErrors"
+}
+
+func (r *ReadErrors) StateFields() []string {
+	return []string{
+		"ReadClosed",
+		"InvalidEndpointState",
+		"NotConnected",
+	}
+}
+
+func (r *ReadErrors) beforeSave() {}
+
+// +checklocksignore
+func (r *ReadErrors) StateSave(stateSinkObject state.Sink) {
+	r.beforeSave()
+	stateSinkObject.Save(0, &r.ReadClosed)
+	stateSinkObject.Save(1, &r.InvalidEndpointState)
+	stateSinkObject.Save(2, &r.NotConnected)
+}
+
+func (r *ReadErrors) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (r *ReadErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &r.ReadClosed)
+	stateSourceObject.Load(1, &r.InvalidEndpointState)
+	stateSourceObject.Load(2, &r.NotConnected)
+}
+
+func (w *WriteErrors) StateTypeName() string {
+	return "pkg/tcpip.WriteErrors"
+}
+
+func (w *WriteErrors) StateFields() []string {
+	return []string{
+		"WriteClosed",
+		"InvalidEndpointState",
+		"InvalidArgs",
+	}
+}
+
+func (w *WriteErrors) beforeSave() {}
+
+// +checklocksignore
+func (w *WriteErrors) StateSave(stateSinkObject state.Sink) {
+	w.beforeSave()
+	stateSinkObject.Save(0, &w.WriteClosed)
+	stateSinkObject.Save(1, &w.InvalidEndpointState)
+	stateSinkObject.Save(2, &w.InvalidArgs)
+}
+
+func (w *WriteErrors) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (w *WriteErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &w.WriteClosed)
+	stateSourceObject.Load(1, &w.InvalidEndpointState)
+	stateSourceObject.Load(2, &w.InvalidArgs)
+}
+
+func (src *TransportEndpointStats) StateTypeName() string {
+	return "pkg/tcpip.TransportEndpointStats"
+}
+
+func (src *TransportEndpointStats) StateFields() []string {
+	return []string{
+		"PacketsReceived",
+		"PacketsSent",
+		"ReceiveErrors",
+		"ReadErrors",
+		"SendErrors",
+		"WriteErrors",
+	}
+}
+
+func (src *TransportEndpointStats) beforeSave() {}
+
+// +checklocksignore
+func (src *TransportEndpointStats) StateSave(stateSinkObject state.Sink) {
+	src.beforeSave()
+	stateSinkObject.Save(0, &src.PacketsReceived)
+	stateSinkObject.Save(1, &src.PacketsSent)
+	stateSinkObject.Save(2, &src.ReceiveErrors)
+	stateSinkObject.Save(3, &src.ReadErrors)
+	stateSinkObject.Save(4, &src.SendErrors)
+	stateSinkObject.Save(5, &src.WriteErrors)
+}
+
+func (src *TransportEndpointStats) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (src *TransportEndpointStats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &src.PacketsReceived)
+	stateSourceObject.Load(1, &src.PacketsSent)
+	stateSourceObject.Load(2, &src.ReceiveErrors)
+	stateSourceObject.Load(3, &src.ReadErrors)
+	stateSourceObject.Load(4, &src.SendErrors)
+	stateSourceObject.Load(5, &src.WriteErrors)
+}
+
+func (a *AddressWithPrefix) StateTypeName() string {
+	return "pkg/tcpip.AddressWithPrefix"
+}
+
+func (a *AddressWithPrefix) StateFields() []string {
+	return []string{
+		"Address",
+		"PrefixLen",
+	}
+}
+
+func (a *AddressWithPrefix) beforeSave() {}
+
+// +checklocksignore
+func (a *AddressWithPrefix) StateSave(stateSinkObject state.Sink) {
+	a.beforeSave()
+	stateSinkObject.Save(0, &a.Address)
+	stateSinkObject.Save(1, &a.PrefixLen)
+}
+
+func (a *AddressWithPrefix) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (a *AddressWithPrefix) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &a.Address)
+	stateSourceObject.Load(1, &a.PrefixLen)
+}
+
+func (p *ProtocolAddress) StateTypeName() string {
+	return "pkg/tcpip.ProtocolAddress"
+}
+
+func (p *ProtocolAddress) StateFields() []string {
+	return []string{
+		"Protocol",
+		"AddressWithPrefix",
+	}
+}
+
+func (p *ProtocolAddress) beforeSave() {}
+
+// +checklocksignore
+func (p *ProtocolAddress) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.Protocol)
+	stateSinkObject.Save(1, &p.AddressWithPrefix)
+}
+
+func (p *ProtocolAddress) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *ProtocolAddress) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.Protocol)
+	stateSourceObject.Load(1, &p.AddressWithPrefix)
+}
+
+func (j *jobInstance) StateTypeName() string {
+	return "pkg/tcpip.jobInstance"
+}
+
+func (j *jobInstance) StateFields() []string {
+	return []string{
+		"timer",
+		"earlyReturn",
+	}
+}
+
+func (j *jobInstance) beforeSave() {}
+
+// +checklocksignore
+func (j *jobInstance) StateSave(stateSinkObject state.Sink) {
+	j.beforeSave()
+	stateSinkObject.Save(0, &j.timer)
+	stateSinkObject.Save(1, &j.earlyReturn)
+}
+
+func (j *jobInstance) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (j *jobInstance) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &j.timer)
+	stateSourceObject.Load(1, &j.earlyReturn)
+}
+
+func (j *Job) StateTypeName() string {
+	return "pkg/tcpip.Job"
+}
+
+func (j *Job) StateFields() []string {
+	return []string{
+		"clock",
+		"instance",
+	}
+}
+
+func (j *Job) beforeSave() {}
+
+// +checklocksignore
+func (j *Job) StateSave(stateSinkObject state.Sink) {
+	j.beforeSave()
+	stateSinkObject.Save(0, &j.clock)
+	stateSinkObject.Save(1, &j.instance)
+}
+
+func (j *Job) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (j *Job) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &j.clock)
+	stateSourceObject.Load(1, &j.instance)
 }
 
 func init() {
@@ -1770,6 +3204,7 @@ func init() {
 	state.Register((*ErrDestinationRequired)(nil))
 	state.Register((*ErrDuplicateAddress)(nil))
 	state.Register((*ErrDuplicateNICID)(nil))
+	state.Register((*ErrInvalidNICID)(nil))
 	state.Register((*ErrInvalidEndpointState)(nil))
 	state.Register((*ErrInvalidOptionValue)(nil))
 	state.Register((*ErrInvalidPortRange)(nil))
@@ -1795,28 +3230,65 @@ func init() {
 	state.Register((*ErrWouldBlock)(nil))
 	state.Register((*ErrMissingRequiredFields)(nil))
 	state.Register((*ErrMulticastInputCannotBeOutput)(nil))
+	state.Register((*RouteList)(nil))
+	state.Register((*RouteEntry)(nil))
 	state.Register((*sockErrorList)(nil))
 	state.Register((*sockErrorEntry)(nil))
 	state.Register((*SocketOptions)(nil))
 	state.Register((*LocalSockError)(nil))
 	state.Register((*SockError)(nil))
 	state.Register((*stdClock)(nil))
+	state.Register((*stdTimer)(nil))
 	state.Register((*MonotonicTime)(nil))
 	state.Register((*Address)(nil))
 	state.Register((*AddressMask)(nil))
+	state.Register((*Subnet)(nil))
 	state.Register((*FullAddress)(nil))
 	state.Register((*SendableControlMessages)(nil))
 	state.Register((*ReceivableControlMessages)(nil))
 	state.Register((*LinkPacketInfo)(nil))
+	state.Register((*TCPSendBufferSizeRangeOption)(nil))
+	state.Register((*TCPReceiveBufferSizeRangeOption)(nil))
 	state.Register((*ICMPv6Filter)(nil))
 	state.Register((*LingerOption)(nil))
 	state.Register((*IPPacketInfo)(nil))
 	state.Register((*IPv6PacketInfo)(nil))
+	state.Register((*SendBufferSizeOption)(nil))
+	state.Register((*ReceiveBufferSizeOption)(nil))
+	state.Register((*Route)(nil))
 	state.Register((*StatCounter)(nil))
+	state.Register((*MultiCounterStat)(nil))
+	state.Register((*ICMPv4PacketStats)(nil))
+	state.Register((*ICMPv4SentPacketStats)(nil))
+	state.Register((*ICMPv4ReceivedPacketStats)(nil))
+	state.Register((*ICMPv4Stats)(nil))
+	state.Register((*ICMPv6PacketStats)(nil))
+	state.Register((*ICMPv6SentPacketStats)(nil))
+	state.Register((*ICMPv6ReceivedPacketStats)(nil))
+	state.Register((*ICMPv6Stats)(nil))
+	state.Register((*ICMPStats)(nil))
+	state.Register((*IGMPPacketStats)(nil))
+	state.Register((*IGMPSentPacketStats)(nil))
+	state.Register((*IGMPReceivedPacketStats)(nil))
+	state.Register((*IGMPStats)(nil))
+	state.Register((*IPForwardingStats)(nil))
+	state.Register((*IPStats)(nil))
+	state.Register((*ARPStats)(nil))
+	state.Register((*TCPStats)(nil))
+	state.Register((*UDPStats)(nil))
+	state.Register((*NICNeighborStats)(nil))
+	state.Register((*NICPacketStats)(nil))
+	state.Register((*IntegralStatCounterMap)(nil))
+	state.Register((*MultiIntegralStatCounterMap)(nil))
+	state.Register((*NICStats)(nil))
+	state.Register((*Stats)(nil))
 	state.Register((*ReceiveErrors)(nil))
 	state.Register((*SendErrors)(nil))
 	state.Register((*ReadErrors)(nil))
 	state.Register((*WriteErrors)(nil))
 	state.Register((*TransportEndpointStats)(nil))
 	state.Register((*AddressWithPrefix)(nil))
+	state.Register((*ProtocolAddress)(nil))
+	state.Register((*jobInstance)(nil))
+	state.Register((*Job)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/timer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/timer.go
index b80b3458..28bc2897 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/timer.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/timer.go
@@ -56,6 +56,8 @@ import (
 //
 // To address the above concerns the simplest solution was to give each timer
 // its own earlyReturn signal.
+//
+// +stateify savable
 type jobInstance struct {
 	timer Timer
 
@@ -93,6 +95,8 @@ func (j *jobInstance) stop() {
 //
 // Note, it is not safe to copy a Job as its timer instance creates
 // a closure over the address of the Job.
+//
+// +stateify savable
 type Job struct {
 	_ sync.NoCopy
 
@@ -106,7 +110,7 @@ type Job struct {
 	// be held when attempting to stop the timer.
 	//
 	// Must never change after being assigned.
-	locker sync.Locker
+	locker sync.Locker `state:"nosave"`
 
 	// fn is the function that will be called when a timer fires and has not been
 	// signaled to early return.
@@ -114,7 +118,8 @@ type Job struct {
 	// fn MUST NOT attempt to lock locker.
 	//
 	// Must never change after being assigned.
-	fn func()
+	// TODO(b/341946753): Restore when netstack is savable.
+	fn func() `state:"nosave"`
 }
 
 // Cancel prevents the Job from executing if it has not executed already.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint.go
index 3bebf8d5..988604fc 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint.go
@@ -36,7 +36,7 @@ type icmpPacket struct {
 	icmpPacketEntry
 	senderAddress tcpip.FullAddress
 	packetInfo    tcpip.IPPacketInfo
-	data          stack.PacketBufferPtr
+	data          *stack.PacketBuffer
 	receivedAt    time.Time `state:".(int64)"`
 
 	// tosOrTClass stores either the Type of Service for IPv4 or the Traffic Class
@@ -60,7 +60,6 @@ type endpoint struct {
 	stack       *stack.Stack `state:"manual"`
 	transProto  tcpip.TransportProtocolNumber
 	waiterQueue *waiter.Queue
-	uniqueID    uint64
 	net         network.Endpoint
 	stats       tcpip.TransportEndpointStats
 	ops         tcpip.SocketOptions
@@ -86,7 +85,6 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, transProt
 		stack:       s,
 		transProto:  transProto,
 		waiterQueue: waiterQueue,
-		uniqueID:    s.UniqueID(),
 	}
 	ep.ops.InitHandler(ep, ep.stack, tcpip.GetStackSendBufferLimits, tcpip.GetStackReceiveBufferLimits)
 	ep.ops.SetSendBufferSize(32*1024, false /* notify */)
@@ -110,11 +108,6 @@ func (e *endpoint) WakeupWriters() {
 	e.net.MaybeSignalWritable()
 }
 
-// UniqueID implements stack.TransportEndpoint.UniqueID.
-func (e *endpoint) UniqueID() uint64 {
-	return e.uniqueID
-}
-
 // Abort implements stack.TransportEndpoint.Abort.
 func (e *endpoint) Abort() {
 	e.Close()
@@ -412,7 +405,7 @@ func send4(s *stack.Stack, ctx *network.WriteContext, ident uint16, data *buffer
 	}
 
 	pkt := ctx.TryNewPacketBuffer(header.ICMPv4MinimumSize+int(maxHeaderLength), buffer.Buffer{})
-	if pkt.IsNil() {
+	if pkt == nil {
 		return &tcpip.ErrWouldBlock{}
 	}
 	defer pkt.DecRef()
@@ -454,7 +447,7 @@ func send6(s *stack.Stack, ctx *network.WriteContext, ident uint16, data *buffer
 	}
 
 	pkt := ctx.TryNewPacketBuffer(header.ICMPv6MinimumSize+int(maxHeaderLength), buffer.Buffer{})
-	if pkt.IsNil() {
+	if pkt == nil {
 		return &tcpip.ErrWouldBlock{}
 	}
 	defer pkt.DecRef()
@@ -584,7 +577,7 @@ func (e *endpoint) registerWithStack(netProto tcpip.NetworkProtocolNumber, id st
 	}
 
 	// We need to find a port for the endpoint.
-	_, err := e.stack.PickEphemeralPort(e.stack.Rand(), func(p uint16) (bool, tcpip.Error) {
+	_, err := e.stack.PickEphemeralPort(e.stack.SecureRNG(), func(p uint16) (bool, tcpip.Error) {
 		id.LocalPort = p
 		err := e.stack.RegisterTransportEndpoint([]tcpip.NetworkProtocolNumber{netProto}, e.transProto, id, e, ports.Flags{}, bindToDevice)
 		switch err.(type) {
@@ -696,7 +689,7 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {
 
 // HandlePacket is called by the stack when new packets arrive to this transport
 // endpoint.
-func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) {
+func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) {
 	// Only accept echo replies.
 	switch e.net.NetProto() {
 	case header.IPv4ProtocolNumber:
@@ -784,7 +777,7 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketBu
 }
 
 // HandleError implements stack.TransportEndpoint.
-func (*endpoint) HandleError(stack.TransportError, stack.PacketBufferPtr) {}
+func (*endpoint) HandleError(stack.TransportError, *stack.PacketBuffer) {}
 
 // State implements tcpip.Endpoint.State. The ICMP endpoint currently doesn't
 // expose internal socket state.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint_state.go
index 54752dd2..134797e8 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint_state.go
@@ -15,6 +15,7 @@
 package icmp
 
 import (
+	"context"
 	"fmt"
 	"time"
 
@@ -29,22 +30,23 @@ func (p *icmpPacket) saveReceivedAt() int64 {
 }
 
 // loadReceivedAt is invoked by stateify.
-func (p *icmpPacket) loadReceivedAt(nsec int64) {
+func (p *icmpPacket) loadReceivedAt(_ context.Context, nsec int64) {
 	p.receivedAt = time.Unix(0, nsec)
 }
 
 // afterLoad is invoked by stateify.
-func (e *endpoint) afterLoad() {
-	stack.StackFromEnv.RegisterRestoredEndpoint(e)
+func (e *endpoint) afterLoad(ctx context.Context) {
+	stack.RestoreStackFromContext(ctx).RegisterRestoredEndpoint(e)
 }
 
 // beforeSave is invoked by stateify.
 func (e *endpoint) beforeSave() {
 	e.freeze()
+	e.stack.RegisterResumableEndpoint(e)
 }
 
-// Resume implements tcpip.ResumableEndpoint.Resume.
-func (e *endpoint) Resume(s *stack.Stack) {
+// Restore implements tcpip.RestoredEndpoint.Restore.
+func (e *endpoint) Restore(s *stack.Stack) {
 	e.thaw()
 
 	e.net.Resume(s)
@@ -67,3 +69,8 @@ func (e *endpoint) Resume(s *stack.Stack) {
 		panic(fmt.Sprintf("unhandled state = %s", state))
 	}
 }
+
+// Resume implements tcpip.ResumableEndpoint.Resume.
+func (e *endpoint) Resume() {
+	e.thaw()
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/icmp_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/icmp_state_autogen.go
index 4ebbd871..ee6a8c2e 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/icmp_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/icmp_state_autogen.go
@@ -3,6 +3,8 @@
 package icmp
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -38,17 +40,17 @@ func (p *icmpPacket) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(6, &p.ttlOrHopLimit)
 }
 
-func (p *icmpPacket) afterLoad() {}
+func (p *icmpPacket) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (p *icmpPacket) StateLoad(stateSourceObject state.Source) {
+func (p *icmpPacket) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &p.icmpPacketEntry)
 	stateSourceObject.Load(1, &p.senderAddress)
 	stateSourceObject.Load(2, &p.packetInfo)
 	stateSourceObject.Load(3, &p.data)
 	stateSourceObject.Load(5, &p.tosOrTClass)
 	stateSourceObject.Load(6, &p.ttlOrHopLimit)
-	stateSourceObject.LoadValue(4, new(int64), func(y any) { p.loadReceivedAt(y.(int64)) })
+	stateSourceObject.LoadValue(4, new(int64), func(y any) { p.loadReceivedAt(ctx, y.(int64)) })
 }
 
 func (e *endpoint) StateTypeName() string {
@@ -60,7 +62,6 @@ func (e *endpoint) StateFields() []string {
 		"DefaultSocketOptionsHandler",
 		"transProto",
 		"waiterQueue",
-		"uniqueID",
 		"net",
 		"stats",
 		"ops",
@@ -79,34 +80,32 @@ func (e *endpoint) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &e.DefaultSocketOptionsHandler)
 	stateSinkObject.Save(1, &e.transProto)
 	stateSinkObject.Save(2, &e.waiterQueue)
-	stateSinkObject.Save(3, &e.uniqueID)
-	stateSinkObject.Save(4, &e.net)
-	stateSinkObject.Save(5, &e.stats)
-	stateSinkObject.Save(6, &e.ops)
-	stateSinkObject.Save(7, &e.rcvReady)
-	stateSinkObject.Save(8, &e.rcvList)
-	stateSinkObject.Save(9, &e.rcvBufSize)
-	stateSinkObject.Save(10, &e.rcvClosed)
-	stateSinkObject.Save(11, &e.frozen)
-	stateSinkObject.Save(12, &e.ident)
+	stateSinkObject.Save(3, &e.net)
+	stateSinkObject.Save(4, &e.stats)
+	stateSinkObject.Save(5, &e.ops)
+	stateSinkObject.Save(6, &e.rcvReady)
+	stateSinkObject.Save(7, &e.rcvList)
+	stateSinkObject.Save(8, &e.rcvBufSize)
+	stateSinkObject.Save(9, &e.rcvClosed)
+	stateSinkObject.Save(10, &e.frozen)
+	stateSinkObject.Save(11, &e.ident)
 }
 
 // +checklocksignore
-func (e *endpoint) StateLoad(stateSourceObject state.Source) {
+func (e *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.DefaultSocketOptionsHandler)
 	stateSourceObject.Load(1, &e.transProto)
 	stateSourceObject.Load(2, &e.waiterQueue)
-	stateSourceObject.Load(3, &e.uniqueID)
-	stateSourceObject.Load(4, &e.net)
-	stateSourceObject.Load(5, &e.stats)
-	stateSourceObject.Load(6, &e.ops)
-	stateSourceObject.Load(7, &e.rcvReady)
-	stateSourceObject.Load(8, &e.rcvList)
-	stateSourceObject.Load(9, &e.rcvBufSize)
-	stateSourceObject.Load(10, &e.rcvClosed)
-	stateSourceObject.Load(11, &e.frozen)
-	stateSourceObject.Load(12, &e.ident)
-	stateSourceObject.AfterLoad(e.afterLoad)
+	stateSourceObject.Load(3, &e.net)
+	stateSourceObject.Load(4, &e.stats)
+	stateSourceObject.Load(5, &e.ops)
+	stateSourceObject.Load(6, &e.rcvReady)
+	stateSourceObject.Load(7, &e.rcvList)
+	stateSourceObject.Load(8, &e.rcvBufSize)
+	stateSourceObject.Load(9, &e.rcvClosed)
+	stateSourceObject.Load(10, &e.frozen)
+	stateSourceObject.Load(11, &e.ident)
+	stateSourceObject.AfterLoad(func() { e.afterLoad(ctx) })
 }
 
 func (l *icmpPacketList) StateTypeName() string {
@@ -129,10 +128,10 @@ func (l *icmpPacketList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *icmpPacketList) afterLoad() {}
+func (l *icmpPacketList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *icmpPacketList) StateLoad(stateSourceObject state.Source) {
+func (l *icmpPacketList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
@@ -157,17 +156,46 @@ func (e *icmpPacketEntry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *icmpPacketEntry) afterLoad() {}
+func (e *icmpPacketEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *icmpPacketEntry) StateLoad(stateSourceObject state.Source) {
+func (e *icmpPacketEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
 
+func (p *protocol) StateTypeName() string {
+	return "pkg/tcpip/transport/icmp.protocol"
+}
+
+func (p *protocol) StateFields() []string {
+	return []string{
+		"stack",
+		"number",
+	}
+}
+
+func (p *protocol) beforeSave() {}
+
+// +checklocksignore
+func (p *protocol) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.stack)
+	stateSinkObject.Save(1, &p.number)
+}
+
+func (p *protocol) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *protocol) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.stack)
+	stateSourceObject.Load(1, &p.number)
+}
+
 func init() {
 	state.Register((*icmpPacket)(nil))
 	state.Register((*endpoint)(nil))
 	state.Register((*icmpPacketList)(nil))
 	state.Register((*icmpPacketEntry)(nil))
+	state.Register((*protocol)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/protocol.go
index d9833e47..8bca0fa5 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/protocol.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/protocol.go
@@ -35,6 +35,8 @@ const (
 )
 
 // protocol implements stack.TransportProtocol.
+//
+// +stateify savable
 type protocol struct {
 	stack *stack.Stack
 
@@ -100,7 +102,7 @@ func (p *protocol) ParsePorts(v []byte) (src, dst uint16, err tcpip.Error) {
 
 // HandleUnknownDestinationPacket handles packets targeted at this protocol but
 // that don't match any existing endpoint.
-func (*protocol) HandleUnknownDestinationPacket(stack.TransportEndpointID, stack.PacketBufferPtr) stack.UnknownDestinationPacketDisposition {
+func (*protocol) HandleUnknownDestinationPacket(stack.TransportEndpointID, *stack.PacketBuffer) stack.UnknownDestinationPacketDisposition {
 	return stack.UnknownDestinationPacketHandled
 }
 
@@ -127,7 +129,7 @@ func (*protocol) Pause() {}
 func (*protocol) Resume() {}
 
 // Parse implements stack.TransportProtocol.Parse.
-func (*protocol) Parse(pkt stack.PacketBufferPtr) bool {
+func (*protocol) Parse(pkt *stack.PacketBuffer) bool {
 	// Right now, the Parse() method is tied to enabled protocols passed into
 	// stack.New. This works for UDP and TCP, but we handle ICMP traffic even
 	// when netstack users don't pass ICMP as a supported protocol.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go
index 880099f1..9b77ae36 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go
@@ -265,7 +265,7 @@ func (c *WriteContext) PacketInfo() WritePacketInfo {
 //
 // If this method returns nil, the caller should wait for the endpoint to become
 // writable.
-func (c *WriteContext) TryNewPacketBuffer(reserveHdrBytes int, data buffer.Buffer) stack.PacketBufferPtr {
+func (c *WriteContext) TryNewPacketBuffer(reserveHdrBytes int, data buffer.Buffer) *stack.PacketBuffer {
 	e := c.e
 
 	e.sendBufferSizeInUseMu.Lock()
@@ -274,7 +274,34 @@ func (c *WriteContext) TryNewPacketBuffer(reserveHdrBytes int, data buffer.Buffe
 	if !e.hasSendSpaceRLocked() {
 		return nil
 	}
+	return c.newPacketBufferLocked(reserveHdrBytes, data)
+}
+
+// TryNewPacketBufferFromPayloader returns a new packet buffer iff the endpoint's send buffer
+// is not full. Otherwise, data from `payloader` isn't read.
+//
+// If this method returns nil, the caller should wait for the endpoint to become
+// writable.
+func (c *WriteContext) TryNewPacketBufferFromPayloader(reserveHdrBytes int, payloader tcpip.Payloader) *stack.PacketBuffer {
+	e := c.e
+
+	e.sendBufferSizeInUseMu.Lock()
+	defer e.sendBufferSizeInUseMu.Unlock()
 
+	if !e.hasSendSpaceRLocked() {
+		return nil
+	}
+	var data buffer.Buffer
+	if _, err := data.WriteFromReader(payloader, int64(payloader.Len())); err != nil {
+		data.Release()
+		return nil
+	}
+	return c.newPacketBufferLocked(reserveHdrBytes, data)
+}
+
+// +checklocks:c.e.sendBufferSizeInUseMu
+func (c *WriteContext) newPacketBufferLocked(reserveHdrBytes int, data buffer.Buffer) *stack.PacketBuffer {
+	e := c.e
 	// Note that we allow oversubscription - if there is any space at all in the
 	// send buffer, we accept the full packet which may be larger than the space
 	// available. This is because if the endpoint reports that it is writable,
@@ -308,7 +335,7 @@ func (c *WriteContext) TryNewPacketBuffer(reserveHdrBytes int, data buffer.Buffe
 }
 
 // WritePacket attempts to write the packet.
-func (c *WriteContext) WritePacket(pkt stack.PacketBufferPtr, headerIncluded bool) tcpip.Error {
+func (c *WriteContext) WritePacket(pkt *stack.PacketBuffer, headerIncluded bool) tcpip.Error {
 	c.e.mu.RLock()
 	pkt.Owner = c.e.owner
 	c.e.mu.RUnlock()
@@ -411,7 +438,7 @@ func (e *Endpoint) AcquireContextForWrite(opts tcpip.WriteOptions) (WriteContext
 		// interface/address used to send the packet so we need to construct
 		// a new route instead of using the connected route.
 		//
-		// Contruct a destination matching the remote the endpoint is connected
+		// Construct a destination matching the remote the endpoint is connected
 		// to.
 		to = &tcpip.FullAddress{
 			// RegisterNICID is set when the endpoint is connected. It is usually
@@ -489,7 +516,7 @@ func (e *Endpoint) AcquireContextForWrite(opts tcpip.WriteOptions) (WriteContext
 			}
 		}
 
-		dst, netProto, err := e.checkV4Mapped(*to)
+		dst, netProto, err := e.checkV4Mapped(*to, false /* bind */)
 		if err != nil {
 			return WriteContext{}, err
 		}
@@ -629,7 +656,7 @@ func (e *Endpoint) ConnectAndThen(addr tcpip.FullAddress, f func(netProto tcpip.
 		return &tcpip.ErrInvalidEndpointState{}
 	}
 
-	addr, netProto, err := e.checkV4Mapped(addr)
+	addr, netProto, err := e.checkV4Mapped(addr, false /* bind */)
 	if err != nil {
 		return err
 	}
@@ -683,9 +710,9 @@ func (e *Endpoint) Shutdown() tcpip.Error {
 
 // checkV4MappedRLocked determines the effective network protocol and converts
 // addr to its canonical form.
-func (e *Endpoint) checkV4Mapped(addr tcpip.FullAddress) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) {
+func (e *Endpoint) checkV4Mapped(addr tcpip.FullAddress, bind bool) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) {
 	info := e.Info()
-	unwrapped, netProto, err := info.AddrNetProtoLocked(addr, e.ops.GetV6Only())
+	unwrapped, netProto, err := info.AddrNetProtoLocked(addr, e.ops.GetV6Only(), bind)
 	if err != nil {
 		return tcpip.FullAddress{}, 0, err
 	}
@@ -720,7 +747,7 @@ func (e *Endpoint) BindAndThen(addr tcpip.FullAddress, f func(tcpip.NetworkProto
 		return &tcpip.ErrInvalidEndpointState{}
 	}
 
-	addr, netProto, err := e.checkV4Mapped(addr)
+	addr, netProto, err := e.checkV4Mapped(addr, true /* bind */)
 	if err != nil {
 		return err
 	}
@@ -797,7 +824,7 @@ func (e *Endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error {
 	case tcpip.MTUDiscoverOption:
 		// Return not supported if the value is not disabling path
 		// MTU discovery.
-		if v != tcpip.PMTUDiscoveryDont {
+		if tcpip.PMTUDStrategy(v) != tcpip.PMTUDiscoveryDont {
 			return &tcpip.ErrNotSupported{}
 		}
 
@@ -835,7 +862,7 @@ func (e *Endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {
 	switch opt {
 	case tcpip.MTUDiscoverOption:
 		// The only supported setting is path MTU discovery disabled.
-		return tcpip.PMTUDiscoveryDont, nil
+		return int(tcpip.PMTUDiscoveryDont), nil
 
 	case tcpip.MulticastTTLOption:
 		e.mu.Lock()
@@ -880,7 +907,7 @@ func (e *Endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error {
 		defer e.mu.Unlock()
 
 		fa := tcpip.FullAddress{Addr: v.InterfaceAddr}
-		fa, netProto, err := e.checkV4Mapped(fa)
+		fa, netProto, err := e.checkV4Mapped(fa, true /* bind */)
 		if err != nil {
 			return err
 		}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/network_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/network_state_autogen.go
index 053e1d5d..f3e38fc8 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/network_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/network_state_autogen.go
@@ -3,6 +3,8 @@
 package network
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -58,10 +60,10 @@ func (e *Endpoint) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(17, &e.state)
 }
 
-func (e *Endpoint) afterLoad() {}
+func (e *Endpoint) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *Endpoint) StateLoad(stateSourceObject state.Source) {
+func (e *Endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.ops)
 	stateSourceObject.Load(1, &e.netProto)
 	stateSourceObject.Load(2, &e.transProto)
@@ -102,10 +104,10 @@ func (m *multicastMembership) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &m.multicastAddr)
 }
 
-func (m *multicastMembership) afterLoad() {}
+func (m *multicastMembership) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (m *multicastMembership) StateLoad(stateSourceObject state.Source) {
+func (m *multicastMembership) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &m.nicID)
 	stateSourceObject.Load(1, &m.multicastAddr)
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/endpoint.go
index 3e9c4c4b..be2adae1 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/endpoint.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/endpoint.go
@@ -137,7 +137,7 @@ func (*endpoint) GetSockOptInt(tcpip.SockOptInt) (int, tcpip.Error) {
 }
 
 // HandlePacket implements stack.RawTransportEndpoint.HandlePacket.
-func (*endpoint) HandlePacket(pkt stack.PacketBufferPtr) {
+func (*endpoint) HandlePacket(pkt *stack.PacketBuffer) {
 	panic(fmt.Sprintf("unreachable: noop.endpoint should never be registered, but got packet: %+v", pkt))
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/noop_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/noop_state_autogen.go
index 27d6064a..ac5a8611 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/noop_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/noop_state_autogen.go
@@ -3,6 +3,8 @@
 package noop
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -26,10 +28,10 @@ func (ep *endpoint) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &ep.ops)
 }
 
-func (ep *endpoint) afterLoad() {}
+func (ep *endpoint) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (ep *endpoint) StateLoad(stateSourceObject state.Source) {
+func (ep *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &ep.DefaultSocketOptionsHandler)
 	stateSourceObject.Load(1, &ep.ops)
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go
index 46a7f82a..9166bca6 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go
@@ -40,7 +40,7 @@ import (
 type packet struct {
 	packetEntry
 	// data holds the actual packet data, including any headers and payload.
-	data       stack.PacketBufferPtr
+	data       *stack.PacketBuffer
 	receivedAt time.Time `state:".(int64)"`
 	// senderAddr is the network address of the sender.
 	senderAddr tcpip.FullAddress
@@ -94,7 +94,7 @@ type endpoint struct {
 }
 
 // NewEndpoint returns a new packet endpoint.
-func NewEndpoint(s *stack.Stack, cooked bool, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) {
+func NewEndpoint(s *stack.Stack, cooked bool, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) tcpip.Endpoint {
 	ep := &endpoint{
 		stack:         s,
 		cooked:        cooked,
@@ -115,10 +115,9 @@ func NewEndpoint(s *stack.Stack, cooked bool, netProto tcpip.NetworkProtocolNumb
 		ep.ops.SetReceiveBufferSize(int64(rs.Default), false /* notify */)
 	}
 
-	if err := s.RegisterPacketEndpoint(0, netProto, ep); err != nil {
-		return nil, err
-	}
-	return ep, nil
+	s.RegisterPacketEndpoint(0, netProto, ep)
+
+	return ep
 }
 
 // Abort implements stack.TransportEndpoint.Abort.
@@ -263,7 +262,7 @@ func (*endpoint) Disconnect() tcpip.Error {
 }
 
 // Connect implements tcpip.Endpoint.Connect. Packet sockets cannot be
-// connected, and this function always returnes *tcpip.ErrNotSupported.
+// connected, and this function always returns *tcpip.ErrNotSupported.
 func (*endpoint) Connect(tcpip.FullAddress) tcpip.Error {
 	return &tcpip.ErrNotSupported{}
 }
@@ -417,7 +416,7 @@ func (ep *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {
 }
 
 // HandlePacket implements stack.PacketEndpoint.HandlePacket.
-func (ep *endpoint) HandlePacket(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
+func (ep *endpoint) HandlePacket(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
 	ep.rcvMu.Lock()
 
 	// Drop the packet if our buffer is currently full.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint_state.go
index 74203fa8..16be7d6b 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint_state.go
@@ -15,6 +15,7 @@
 package packet
 
 import (
+	"context"
 	"fmt"
 	"time"
 
@@ -28,7 +29,7 @@ func (p *packet) saveReceivedAt() int64 {
 }
 
 // loadReceivedAt is invoked by stateify.
-func (p *packet) loadReceivedAt(nsec int64) {
+func (p *packet) loadReceivedAt(_ context.Context, nsec int64) {
 	p.receivedAt = time.Unix(0, nsec)
 }
 
@@ -37,14 +38,15 @@ func (ep *endpoint) beforeSave() {
 	ep.rcvMu.Lock()
 	defer ep.rcvMu.Unlock()
 	ep.rcvDisabled = true
+	ep.stack.RegisterResumableEndpoint(ep)
 }
 
 // afterLoad is invoked by stateify.
-func (ep *endpoint) afterLoad() {
+func (ep *endpoint) afterLoad(ctx context.Context) {
 	ep.mu.Lock()
 	defer ep.mu.Unlock()
 
-	ep.stack = stack.StackFromEnv
+	ep.stack = stack.RestoreStackFromContext(ctx)
 	ep.ops.InitHandler(ep, ep.stack, tcpip.GetStackSendBufferLimits, tcpip.GetStackReceiveBufferLimits)
 
 	if err := ep.stack.RegisterPacketEndpoint(ep.boundNIC, ep.boundNetProto, ep); err != nil {
@@ -55,3 +57,10 @@ func (ep *endpoint) afterLoad() {
 	ep.rcvDisabled = false
 	ep.rcvMu.Unlock()
 }
+
+// Resume implements tcpip.ResumableEndpoint.Resume.
+func (ep *endpoint) Resume() {
+	ep.rcvMu.Lock()
+	defer ep.rcvMu.Unlock()
+	ep.rcvDisabled = false
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/packet_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/packet_state_autogen.go
index 5e344c8d..7e2f7fda 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/packet_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/packet_state_autogen.go
@@ -3,6 +3,8 @@
 package packet
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -34,15 +36,15 @@ func (p *packet) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(4, &p.packetInfo)
 }
 
-func (p *packet) afterLoad() {}
+func (p *packet) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (p *packet) StateLoad(stateSourceObject state.Source) {
+func (p *packet) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &p.packetEntry)
 	stateSourceObject.Load(1, &p.data)
 	stateSourceObject.Load(3, &p.senderAddr)
 	stateSourceObject.Load(4, &p.packetInfo)
-	stateSourceObject.LoadValue(2, new(int64), func(y any) { p.loadReceivedAt(y.(int64)) })
+	stateSourceObject.LoadValue(2, new(int64), func(y any) { p.loadReceivedAt(ctx, y.(int64)) })
 }
 
 func (ep *endpoint) StateTypeName() string {
@@ -86,7 +88,7 @@ func (ep *endpoint) StateSave(stateSinkObject state.Sink) {
 }
 
 // +checklocksignore
-func (ep *endpoint) StateLoad(stateSourceObject state.Source) {
+func (ep *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &ep.DefaultSocketOptionsHandler)
 	stateSourceObject.Load(1, &ep.waiterQueue)
 	stateSourceObject.Load(2, &ep.cooked)
@@ -100,7 +102,7 @@ func (ep *endpoint) StateLoad(stateSourceObject state.Source) {
 	stateSourceObject.Load(10, &ep.boundNetProto)
 	stateSourceObject.Load(11, &ep.boundNIC)
 	stateSourceObject.Load(12, &ep.lastError)
-	stateSourceObject.AfterLoad(ep.afterLoad)
+	stateSourceObject.AfterLoad(func() { ep.afterLoad(ctx) })
 }
 
 func (l *packetList) StateTypeName() string {
@@ -123,10 +125,10 @@ func (l *packetList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *packetList) afterLoad() {}
+func (l *packetList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *packetList) StateLoad(stateSourceObject state.Source) {
+func (l *packetList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
@@ -151,10 +153,10 @@ func (e *packetEntry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *packetEntry) afterLoad() {}
+func (e *packetEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *packetEntry) StateLoad(stateSourceObject state.Source) {
+func (e *packetEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go
index 476932d2..1eaedc19 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go
@@ -46,7 +46,7 @@ type rawPacket struct {
 	rawPacketEntry
 	// data holds the actual packet data, including any headers and
 	// payload.
-	data       stack.PacketBufferPtr
+	data       *stack.PacketBuffer
 	receivedAt time.Time `state:".(int64)"`
 	// senderAddr is the network address of the sender.
 	senderAddr tcpip.FullAddress
@@ -377,7 +377,7 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp
 	}
 
 	pkt := ctx.TryNewPacketBuffer(int(ctx.PacketInfo().MaxHeaderLength), payload.Clone())
-	if pkt.IsNil() {
+	if pkt == nil {
 		return 0, &tcpip.ErrWouldBlock{}
 	}
 	defer pkt.DecRef()
@@ -586,7 +586,7 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {
 }
 
 // HandlePacket implements stack.RawTransportEndpoint.HandlePacket.
-func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) {
+func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {
 	notifyReadableEvents := func() bool {
 		e.mu.RLock()
 		defer e.mu.RUnlock()
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint_state.go
index 1bda0b8b..d915ade2 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint_state.go
@@ -15,6 +15,7 @@
 package raw
 
 import (
+	"context"
 	"fmt"
 	"time"
 
@@ -28,22 +29,23 @@ func (p *rawPacket) saveReceivedAt() int64 {
 }
 
 // loadReceivedAt is invoked by stateify.
-func (p *rawPacket) loadReceivedAt(nsec int64) {
+func (p *rawPacket) loadReceivedAt(_ context.Context, nsec int64) {
 	p.receivedAt = time.Unix(0, nsec)
 }
 
 // afterLoad is invoked by stateify.
-func (e *endpoint) afterLoad() {
-	stack.StackFromEnv.RegisterRestoredEndpoint(e)
+func (e *endpoint) afterLoad(ctx context.Context) {
+	stack.RestoreStackFromContext(ctx).RegisterRestoredEndpoint(e)
 }
 
 // beforeSave is invoked by stateify.
 func (e *endpoint) beforeSave() {
 	e.setReceiveDisabled(true)
+	e.stack.RegisterResumableEndpoint(e)
 }
 
-// Resume implements tcpip.ResumableEndpoint.Resume.
-func (e *endpoint) Resume(s *stack.Stack) {
+// Restore implements tcpip.RestoredEndpoint.Restore.
+func (e *endpoint) Restore(s *stack.Stack) {
 	e.net.Resume(s)
 
 	e.setReceiveDisabled(false)
@@ -57,3 +59,8 @@ func (e *endpoint) Resume(s *stack.Stack) {
 		}
 	}
 }
+
+// Resume implements tcpip.ResumableEndpoint.Resume.
+func (e *endpoint) Resume() {
+	e.setReceiveDisabled(false)
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/protocol.go
index 624e2dbe..786f0896 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/protocol.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/protocol.go
@@ -23,6 +23,8 @@ import (
 )
 
 // EndpointFactory implements stack.RawFactory.
+//
+// +stateify savable
 type EndpointFactory struct{}
 
 // NewUnassociatedEndpoint implements stack.RawFactory.NewUnassociatedEndpoint.
@@ -32,11 +34,13 @@ func (EndpointFactory) NewUnassociatedEndpoint(stack *stack.Stack, netProto tcpi
 
 // NewPacketEndpoint implements stack.RawFactory.NewPacketEndpoint.
 func (EndpointFactory) NewPacketEndpoint(stack *stack.Stack, cooked bool, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) {
-	return packet.NewEndpoint(stack, cooked, netProto, waiterQueue)
+	return packet.NewEndpoint(stack, cooked, netProto, waiterQueue), nil
 }
 
 // CreateOnlyFactory implements stack.RawFactory. It allows creation of raw
 // endpoints that do not support reading, writing, binding, etc.
+//
+// +stateify savable
 type CreateOnlyFactory struct{}
 
 // NewUnassociatedEndpoint implements stack.RawFactory.NewUnassociatedEndpoint.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/raw_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/raw_state_autogen.go
index 1c9bbdbb..0793ad18 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/raw_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/raw_state_autogen.go
@@ -3,6 +3,8 @@
 package raw
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -38,17 +40,17 @@ func (p *rawPacket) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(6, &p.ttlOrHopLimit)
 }
 
-func (p *rawPacket) afterLoad() {}
+func (p *rawPacket) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (p *rawPacket) StateLoad(stateSourceObject state.Source) {
+func (p *rawPacket) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &p.rawPacketEntry)
 	stateSourceObject.Load(1, &p.data)
 	stateSourceObject.Load(3, &p.senderAddr)
 	stateSourceObject.Load(4, &p.packetInfo)
 	stateSourceObject.Load(5, &p.tosOrTClass)
 	stateSourceObject.Load(6, &p.ttlOrHopLimit)
-	stateSourceObject.LoadValue(2, new(int64), func(y any) { p.loadReceivedAt(y.(int64)) })
+	stateSourceObject.LoadValue(2, new(int64), func(y any) { p.loadReceivedAt(ctx, y.(int64)) })
 }
 
 func (e *endpoint) StateTypeName() string {
@@ -92,7 +94,7 @@ func (e *endpoint) StateSave(stateSinkObject state.Sink) {
 }
 
 // +checklocksignore
-func (e *endpoint) StateLoad(stateSourceObject state.Source) {
+func (e *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.DefaultSocketOptionsHandler)
 	stateSourceObject.Load(1, &e.transProto)
 	stateSourceObject.Load(2, &e.waiterQueue)
@@ -106,7 +108,49 @@ func (e *endpoint) StateLoad(stateSourceObject state.Source) {
 	stateSourceObject.Load(10, &e.rcvDisabled)
 	stateSourceObject.Load(11, &e.ipv6ChecksumOffset)
 	stateSourceObject.Load(12, &e.icmpv6Filter)
-	stateSourceObject.AfterLoad(e.afterLoad)
+	stateSourceObject.AfterLoad(func() { e.afterLoad(ctx) })
+}
+
+func (e *EndpointFactory) StateTypeName() string {
+	return "pkg/tcpip/transport/raw.EndpointFactory"
+}
+
+func (e *EndpointFactory) StateFields() []string {
+	return []string{}
+}
+
+func (e *EndpointFactory) beforeSave() {}
+
+// +checklocksignore
+func (e *EndpointFactory) StateSave(stateSinkObject state.Sink) {
+	e.beforeSave()
+}
+
+func (e *EndpointFactory) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (e *EndpointFactory) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+}
+
+func (c *CreateOnlyFactory) StateTypeName() string {
+	return "pkg/tcpip/transport/raw.CreateOnlyFactory"
+}
+
+func (c *CreateOnlyFactory) StateFields() []string {
+	return []string{}
+}
+
+func (c *CreateOnlyFactory) beforeSave() {}
+
+// +checklocksignore
+func (c *CreateOnlyFactory) StateSave(stateSinkObject state.Sink) {
+	c.beforeSave()
+}
+
+func (c *CreateOnlyFactory) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (c *CreateOnlyFactory) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 }
 
 func (l *rawPacketList) StateTypeName() string {
@@ -129,10 +173,10 @@ func (l *rawPacketList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *rawPacketList) afterLoad() {}
+func (l *rawPacketList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *rawPacketList) StateLoad(stateSourceObject state.Source) {
+func (l *rawPacketList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
@@ -157,10 +201,10 @@ func (e *rawPacketEntry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *rawPacketEntry) afterLoad() {}
+func (e *rawPacketEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *rawPacketEntry) StateLoad(stateSourceObject state.Source) {
+func (e *rawPacketEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
@@ -168,6 +212,8 @@ func (e *rawPacketEntry) StateLoad(stateSourceObject state.Source) {
 func init() {
 	state.Register((*rawPacket)(nil))
 	state.Register((*endpoint)(nil))
+	state.Register((*EndpointFactory)(nil))
+	state.Register((*CreateOnlyFactory)(nil))
 	state.Register((*rawPacketList)(nil))
 	state.Register((*rawPacketEntry)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go
index 0006f250..adcfdcfd 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go
@@ -85,7 +85,7 @@ type listenContext struct {
 
 	// listenEP is a reference to the listening endpoint associated with
 	// this context. Can be nil if the context is created by the forwarder.
-	listenEP *endpoint
+	listenEP *Endpoint
 
 	// hasherMu protects hasher.
 	hasherMu sync.Mutex
@@ -107,7 +107,7 @@ func timeStamp(clock tcpip.Clock) uint32 {
 }
 
 // newListenContext creates a new listen context.
-func newListenContext(stk *stack.Stack, protocol *protocol, listenEP *endpoint, rcvWnd seqnum.Size, v6Only bool, netProto tcpip.NetworkProtocolNumber) *listenContext {
+func newListenContext(stk *stack.Stack, protocol *protocol, listenEP *Endpoint, rcvWnd seqnum.Size, v6Only bool, netProto tcpip.NetworkProtocolNumber) *listenContext {
 	l := &listenContext{
 		stack:    stk,
 		protocol: protocol,
@@ -119,7 +119,7 @@ func newListenContext(stk *stack.Stack, protocol *protocol, listenEP *endpoint,
 	}
 
 	for i := range l.nonce {
-		if _, err := io.ReadFull(stk.SecureRNG(), l.nonce[i][:]); err != nil {
+		if _, err := io.ReadFull(stk.SecureRNG().Reader, l.nonce[i][:]); err != nil {
 			panic(err)
 		}
 	}
@@ -183,7 +183,7 @@ func (l *listenContext) isCookieValid(id stack.TransportEndpointID, cookie seqnu
 // the connection parameters given by the arguments. The newly created endpoint
 // will be locked.
 // +checklocksacquire:n.mu
-func (l *listenContext) createConnectingEndpoint(s *segment, rcvdSynOpts header.TCPSynOptions, queue *waiter.Queue) (n *endpoint, _ tcpip.Error) {
+func (l *listenContext) createConnectingEndpoint(s *segment, rcvdSynOpts header.TCPSynOptions, queue *waiter.Queue) (n *Endpoint, _ tcpip.Error) {
 	// Create a new endpoint.
 	netProto := l.netProto
 	if netProto == 0 {
@@ -302,7 +302,7 @@ func (l *listenContext) startHandshake(s *segment, opts header.TCPSynOptions, qu
 // established endpoint is returned.
 //
 // Precondition: if l.listenEP != nil, l.listenEP.mu must be locked.
-func (l *listenContext) performHandshake(s *segment, opts header.TCPSynOptions, queue *waiter.Queue, owner tcpip.PacketOwner) (*endpoint, tcpip.Error) {
+func (l *listenContext) performHandshake(s *segment, opts header.TCPSynOptions, queue *waiter.Queue, owner tcpip.PacketOwner) (*Endpoint, tcpip.Error) {
 	waitEntry, notifyCh := waiter.NewChannelEntry(waiter.WritableEvents)
 	queue.EventRegister(&waitEntry)
 	defer queue.EventUnregister(&waitEntry)
@@ -357,7 +357,7 @@ func (l *listenContext) performHandshake(s *segment, opts header.TCPSynOptions,
 //
 // +checklocks:e.mu
 // +checklocks:n.mu
-func (e *endpoint) propagateInheritableOptionsLocked(n *endpoint) {
+func (e *Endpoint) propagateInheritableOptionsLocked(n *Endpoint) {
 	n.userTimeout = e.userTimeout
 	n.portFlags = e.portFlags
 	n.boundBindToDevice = e.boundBindToDevice
@@ -370,7 +370,7 @@ func (e *endpoint) propagateInheritableOptionsLocked(n *endpoint) {
 // Precondition: e.propagateInheritableOptionsLocked has been called.
 //
 // +checklocks:e.mu
-func (e *endpoint) reserveTupleLocked() bool {
+func (e *Endpoint) reserveTupleLocked() bool {
 	dest := tcpip.FullAddress{
 		Addr: e.TransportEndpointInfo.ID.RemoteAddress,
 		Port: e.TransportEndpointInfo.ID.RemotePort,
@@ -400,11 +400,11 @@ func (e *endpoint) reserveTupleLocked() bool {
 // This is strictly not required normally as a socket that was never accepted
 // can't really have any registered waiters except when stack.Wait() is called
 // which waits for all registered endpoints to stop and expects an EventHUp.
-func (e *endpoint) notifyAborted() {
+func (e *Endpoint) notifyAborted() {
 	e.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.ReadableEvents | waiter.WritableEvents)
 }
 
-func (e *endpoint) acceptQueueIsFull() bool {
+func (e *Endpoint) acceptQueueIsFull() bool {
 	e.acceptMu.Lock()
 	full := e.acceptQueue.isFull()
 	e.acceptMu.Unlock()
@@ -416,11 +416,11 @@ type acceptQueue struct {
 	// NB: this could be an endpointList, but ilist only permits endpoints to
 	// belong to one list at a time, and endpoints are already stored in the
 	// dispatcher's list.
-	endpoints list.List `state:".([]*endpoint)"`
+	endpoints list.List `state:".([]*Endpoint)"`
 
 	// pendingEndpoints is a set of all endpoints for which a handshake is
 	// in progress.
-	pendingEndpoints map[*endpoint]struct{}
+	pendingEndpoints map[*Endpoint]struct{}
 
 	// capacity is the maximum number of endpoints that can be in endpoints.
 	capacity int
@@ -434,7 +434,7 @@ func (a *acceptQueue) isFull() bool {
 // and needs to handle it.
 //
 // +checklocks:e.mu
-func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Error {
+func (e *Endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Error {
 	e.rcvQueueMu.Lock()
 	rcvClosed := e.RcvClosed
 	e.rcvQueueMu.Unlock()
@@ -544,6 +544,39 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Err
 		iss := s.ackNumber - 1
 		irs := s.sequenceNumber - 1
 
+		// As an edge case when SYN-COOKIES are in use and we receive a
+		// segment that has data and is valid we should check if it
+		// already matches a created endpoint and redirect the segment
+		// rather than try and create a new endpoint. This can happen
+		// where the final ACK for the handshake and other data packets
+		// arrive at the same time and are queued to the listening
+		// endpoint before the listening endpoint has had time to
+		// process the first ACK and create the endpoint that matches
+		// the incoming packet's full 5 tuple.
+		netProtos := []tcpip.NetworkProtocolNumber{s.pkt.NetworkProtocolNumber}
+		// If the local address is an IPv4 Address then also look for IPv6
+		// dual stack endpoints.
+		if s.id.LocalAddress.To4() != (tcpip.Address{}) {
+			netProtos = []tcpip.NetworkProtocolNumber{header.IPv4ProtocolNumber, header.IPv6ProtocolNumber}
+		}
+		for _, netProto := range netProtos {
+			if newEP := e.stack.FindTransportEndpoint(netProto, ProtocolNumber, s.id, s.pkt.NICID); newEP != nil && newEP != e {
+				tcpEP := newEP.(*Endpoint)
+				if !tcpEP.EndpointState().connected() {
+					continue
+				}
+				if !tcpEP.enqueueSegment(s) {
+					// Just silently drop the segment as we failed
+					// to queue, we don't want to generate a RST
+					// further below or try and create a new
+					// endpoint etc.
+					return nil
+				}
+				tcpEP.notifyProcessor()
+				return nil
+			}
+		}
+
 		// Since SYN cookies are in use this is potentially an ACK to a
 		// SYN-ACK we sent but don't have a half open connection state
 		// as cookies are being used to protect against a potential SYN
@@ -577,39 +610,6 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Err
 			return replyWithReset(e.stack, s, e.sendTOS, e.ipv4TTL, e.ipv6HopLimit)
 		}
 
-		// As an edge case when SYN-COOKIES are in use and we receive a
-		// segment that has data and is valid we should check if it
-		// already matches a created endpoint and redirect the segment
-		// rather than try and create a new endpoint. This can happen
-		// where the final ACK for the handshake and other data packets
-		// arrive at the same time and are queued to the listening
-		// endpoint before the listening endpoint has had time to
-		// process the first ACK and create the endpoint that matches
-		// the incoming packet's full 5 tuple.
-		netProtos := []tcpip.NetworkProtocolNumber{s.pkt.NetworkProtocolNumber}
-		// If the local address is an IPv4 Address then also look for IPv6
-		// dual stack endpoints.
-		if s.id.LocalAddress.To4() != (tcpip.Address{}) {
-			netProtos = []tcpip.NetworkProtocolNumber{header.IPv4ProtocolNumber, header.IPv6ProtocolNumber}
-		}
-		for _, netProto := range netProtos {
-			if newEP := e.stack.FindTransportEndpoint(netProto, ProtocolNumber, s.id, s.pkt.NICID); newEP != nil && newEP != e {
-				tcpEP := newEP.(*endpoint)
-				if !tcpEP.EndpointState().connected() {
-					continue
-				}
-				if !tcpEP.enqueueSegment(s) {
-					// Just silently drop the segment as we failed
-					// to queue, we don't want to generate a RST
-					// further below or try and create a new
-					// endpoint etc.
-					return nil
-				}
-				tcpEP.notifyProcessor()
-				return nil
-			}
-		}
-
 		// Keep hold of acceptMu until the new endpoint is in the accept queue (or
 		// if there is an error), to guarantee that we will keep our spot in the
 		// queue even if another handshake from the syn queue completes.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go
index 0f900174..7de3fe9e 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go
@@ -15,6 +15,7 @@
 package tcp
 
 import (
+	"crypto/sha256"
 	"encoding/binary"
 	"fmt"
 	"math"
@@ -23,7 +24,6 @@ import (
 	"gvisor.dev/gvisor/pkg/sync"
 	"gvisor.dev/gvisor/pkg/tcpip"
 	"gvisor.dev/gvisor/pkg/tcpip/checksum"
-	"gvisor.dev/gvisor/pkg/tcpip/hash/jenkins"
 	"gvisor.dev/gvisor/pkg/tcpip/header"
 	"gvisor.dev/gvisor/pkg/tcpip/seqnum"
 	"gvisor.dev/gvisor/pkg/tcpip/stack"
@@ -63,8 +63,8 @@ const (
 //
 // +stateify savable
 type handshake struct {
-	ep       *endpoint
-	listenEP *endpoint
+	ep       *Endpoint
+	listenEP *Endpoint
 	state    handshakeState
 	active   bool
 	flags    header.TCPFlags
@@ -97,7 +97,7 @@ type handshake struct {
 	// hit.
 	deferAccept time.Duration
 
-	// acked is true if the the final ACK for a 3-way handshake has
+	// acked is true if the final ACK for a 3-way handshake has
 	// been received. This is required to stop retransmitting the
 	// original SYN-ACK when deferAccept is enabled.
 	acked bool
@@ -115,15 +115,14 @@ type handshake struct {
 	retransmitTimer *backoffTimer `state:"nosave"`
 }
 
-// maybeFailTimerHandler takes a handler function for a timer that may fail and
-// returns a function that will invoke the provided handler with the endpoint
-// mutex held. In addition the returned function will perform any cleanup that
-// maybe required if the timer handler returns an error and in case of no errors
-// will notify the processor if there are pending segments that need to be
-// processed.
-
+// timerHandler takes a handler function for a timer and returns a function that
+// will invoke the provided handler with the endpoint mutex held. In addition
+// the returned function will perform any cleanup that may be required if the
+// timer handler returns an error. In the case of no errors it will notify the
+// processor if there are pending segments that need to be processed.
+//
 // NOTE: e.mu is held for the duration of the call to f().
-func maybeFailTimerHandler(e *endpoint, f func() tcpip.Error) func() {
+func timerHandler(e *Endpoint, f func() tcpip.Error) func() {
 	return func() {
 		e.mu.Lock()
 		if err := f(); err != nil {
@@ -154,30 +153,9 @@ func maybeFailTimerHandler(e *endpoint, f func() tcpip.Error) func() {
 	}
 }
 
-// timerHandler takes a handler function for a timer that never results in a
-// connection being aborted and returns a function that will invoke the provided
-// handler with the endpoint mutex held. In addition the returned function will
-// notify the processor if there are pending segments that need to be processed
-// once the handler function completes.
-//
-// NOTE: e.mu is held for the duration of the call to f()
-func timerHandler(e *endpoint, f func()) func() {
-	return func() {
-		e.mu.Lock()
-		f()
-		processor := e.protocol.dispatcher.selectProcessor(e.ID)
-		e.mu.Unlock()
-		// notify processor if there are pending segments to be
-		// processed.
-		if !e.segmentQueue.empty() {
-			processor.queueEndpoint(e)
-		}
-	}
-}
-
 // +checklocks:e.mu
 // +checklocksacquire:h.ep.mu
-func (e *endpoint) newHandshake() (h *handshake) {
+func (e *Endpoint) newHandshake() (h *handshake) {
 	h = &handshake{
 		ep:          e,
 		active:      true,
@@ -190,7 +168,7 @@ func (e *endpoint) newHandshake() (h *handshake) {
 	e.h = h
 	// By the time handshake is created, e.ID is already initialized.
 	e.TSOffset = e.protocol.tsOffset(e.ID.LocalAddress, e.ID.RemoteAddress)
-	timer, err := newBackoffTimer(h.ep.stack.Clock(), InitialRTO, MaxRTO, maybeFailTimerHandler(e, h.retransmitHandlerLocked))
+	timer, err := newBackoffTimer(h.ep.stack.Clock(), InitialRTO, MaxRTO, timerHandler(e, h.retransmitHandlerLocked))
 	if err != nil {
 		panic(fmt.Sprintf("newBackOffTimer(_, %s, %s, _) failed: %s", InitialRTO, MaxRTO, err))
 	}
@@ -200,7 +178,7 @@ func (e *endpoint) newHandshake() (h *handshake) {
 
 // +checklocks:e.mu
 // +checklocksacquire:h.ep.mu
-func (e *endpoint) newPassiveHandshake(isn, irs seqnum.Value, opts header.TCPSynOptions, deferAccept time.Duration) (h *handshake) {
+func (e *Endpoint) newPassiveHandshake(isn, irs seqnum.Value, opts header.TCPSynOptions, deferAccept time.Duration) (h *handshake) {
 	h = e.newHandshake()
 	h.resetToSynRcvd(isn, irs, opts, deferAccept)
 	return h
@@ -235,11 +213,13 @@ func (h *handshake) resetState() {
 
 // generateSecureISN generates a secure Initial Sequence number based on the
 // recommendation here https://tools.ietf.org/html/rfc6528#page-3.
-func generateSecureISN(id stack.TransportEndpointID, clock tcpip.Clock, seed uint32) seqnum.Value {
-	isnHasher := jenkins.Sum32(seed)
+func generateSecureISN(id stack.TransportEndpointID, clock tcpip.Clock, seed [16]byte) seqnum.Value {
+	isnHasher := sha256.New()
+
 	// Per hash.Hash.Writer:
 	//
 	// It never returns an error.
+	_, _ = isnHasher.Write(seed[:])
 	_, _ = isnHasher.Write(id.LocalAddress.AsSlice())
 	_, _ = isnHasher.Write(id.RemoteAddress.AsSlice())
 	portBuf := make([]byte, 2)
@@ -257,7 +237,8 @@ func generateSecureISN(id stack.TransportEndpointID, clock tcpip.Clock, seed uin
 	//
 	// Which sort of guarantees that we won't reuse the ISN for a new
 	// connection for the same tuple for at least 274s.
-	isn := isnHasher.Sum32() + uint32(clock.NowMonotonic().Sub(tcpip.MonotonicTime{}).Nanoseconds()>>6)
+	hash := binary.LittleEndian.Uint32(isnHasher.Sum(nil)[:4])
+	isn := hash + uint32(clock.NowMonotonic().Sub(tcpip.MonotonicTime{}).Nanoseconds()>>6)
 	return seqnum.Value(isn)
 }
 
@@ -287,19 +268,9 @@ func (h *handshake) resetToSynRcvd(iss seqnum.Value, irs seqnum.Value, opts head
 }
 
 // checkAck checks if the ACK number, if present, of a segment received during
-// a TCP 3-way handshake is valid. If it's not, a RST segment is sent back in
-// response.
+// a TCP 3-way handshake is valid.
 func (h *handshake) checkAck(s *segment) bool {
-	if s.flags.Contains(header.TCPFlagAck) && s.ackNumber != h.iss+1 {
-		// RFC 793, page 72 (https://datatracker.ietf.org/doc/html/rfc793#page-72):
-		//   If the segment acknowledgment is not acceptable, form a reset segment,
-		//        <SEQ=SEG.ACK><CTL=RST>
-		//   and send it.
-		h.ep.sendEmptyRaw(header.TCPFlagRst, s.ackNumber, 0, 0)
-		return false
-	}
-
-	return true
+	return !(s.flags.Contains(header.TCPFlagAck) && s.ackNumber != h.iss+1)
 }
 
 // synSentState handles a segment received when the TCP 3-way handshake is in
@@ -321,6 +292,11 @@ func (h *handshake) synSentState(s *segment) tcpip.Error {
 	}
 
 	if !h.checkAck(s) {
+		// RFC 793, page 72 (https://datatracker.ietf.org/doc/html/rfc793#page-72):
+		//   If the segment acknowledgment is not acceptable, form a reset segment,
+		//        <SEQ=SEG.ACK><CTL=RST>
+		//   and send it.
+		h.ep.sendEmptyRaw(header.TCPFlagRst, s.ackNumber, 0, 0)
 		return nil
 	}
 
@@ -402,8 +378,30 @@ func (h *handshake) synRcvdState(s *segment) tcpip.Error {
 		return nil
 	}
 
-	if !h.checkAck(s) {
-		return nil
+	// It's possible that s is an ACK of a SYN cookie. This can happen if:
+	//
+	//   - We receive a SYN while under load and issue a SYN/ACK with
+	//     cookie S.
+	//   - We receive a retransmitted SYN while space exists in the SYN
+	//     queue, and issue a SYN/ACK with seqnum S'.
+	//   - We receive the ACK based on S.
+	//
+	// If we receive a SYN cookie ACK, just use the cookie seqnum.
+	if !h.checkAck(s) && h.listenEP != nil {
+		iss := s.ackNumber - 1
+		data, ok := h.listenEP.listenCtx.isCookieValid(s.id, iss, s.sequenceNumber-1)
+		if !ok || int(data) >= len(mssTable) {
+			// This isn't a valid cookie.
+			// RFC 793, page 72 (https://datatracker.ietf.org/doc/html/rfc793#page-72):
+			//   If the segment acknowledgment is not acceptable, form a reset segment,
+			//        <SEQ=SEG.ACK><CTL=RST>
+			//   and send it.
+			h.ep.sendEmptyRaw(header.TCPFlagRst, s.ackNumber, 0, 0)
+			return nil
+		}
+		// This is a cookie that snuck its way in after we stopped using them.
+		h.mss = mssTable[data]
+		h.iss = iss
 	}
 
 	// RFC 793, Section 3.9, page 69, states that in the SYN-RCVD state, a
@@ -796,9 +794,10 @@ type tcpFields struct {
 	rcvWnd seqnum.Size
 	opts   []byte
 	txHash uint32
+	df     bool
 }
 
-func (e *endpoint) sendSynTCP(r *stack.Route, tf tcpFields, opts header.TCPSynOptions) tcpip.Error {
+func (e *Endpoint) sendSynTCP(r *stack.Route, tf tcpFields, opts header.TCPSynOptions) tcpip.Error {
 	tf.opts = makeSynOptions(opts)
 	// We ignore SYN send errors and let the callers re-attempt send.
 	p := stack.NewPacketBuffer(stack.PacketBufferOptions{ReserveHeaderBytes: header.TCPMinimumSize + int(r.MaxHeaderLength()) + len(tf.opts)})
@@ -811,7 +810,7 @@ func (e *endpoint) sendSynTCP(r *stack.Route, tf tcpFields, opts header.TCPSynOp
 }
 
 // This method takes ownership of pkt.
-func (e *endpoint) sendTCP(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack.GSO) tcpip.Error {
+func (e *Endpoint) sendTCP(r *stack.Route, tf tcpFields, pkt *stack.PacketBuffer, gso stack.GSO) tcpip.Error {
 	tf.txHash = e.txHash
 	if err := sendTCP(r, tf, pkt, gso, e.owner); err != nil {
 		e.stats.SendErrors.SegmentSendToNetworkFailed.Increment()
@@ -821,7 +820,7 @@ func (e *endpoint) sendTCP(r *stack.Route, tf tcpFields, pkt stack.PacketBufferP
 	return nil
 }
 
-func buildTCPHdr(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack.GSO) {
+func buildTCPHdr(r *stack.Route, tf tcpFields, pkt *stack.PacketBuffer, gso stack.GSO) {
 	optLen := len(tf.opts)
 	tcp := header.TCP(pkt.TransportHeader().Push(header.TCPMinimumSize + optLen))
 	pkt.TransportProtocolNumber = header.TCPProtocolNumber
@@ -850,7 +849,7 @@ func buildTCPHdr(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso st
 	}
 }
 
-func sendTCPBatch(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack.GSO, owner tcpip.PacketOwner) tcpip.Error {
+func sendTCPBatch(r *stack.Route, tf tcpFields, pkt *stack.PacketBuffer, gso stack.GSO, owner tcpip.PacketOwner) tcpip.Error {
 	optLen := len(tf.opts)
 	if tf.rcvWnd > math.MaxUint16 {
 		tf.rcvWnd = math.MaxUint16
@@ -883,7 +882,7 @@ func sendTCPBatch(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso s
 		buildTCPHdr(r, tf, pkt, gso)
 		tf.seq = tf.seq.Add(seqnum.Size(packetSize))
 		pkt.GSOOptions = gso
-		if err := r.WritePacket(stack.NetworkHeaderParams{Protocol: ProtocolNumber, TTL: tf.ttl, TOS: tf.tos}, pkt); err != nil {
+		if err := r.WritePacket(stack.NetworkHeaderParams{Protocol: ProtocolNumber, TTL: tf.ttl, TOS: tf.tos, DF: tf.df}, pkt); err != nil {
 			r.Stats().TCP.SegmentSendErrors.Increment()
 			if shouldSplitPacket {
 				pkt.DecRef()
@@ -901,7 +900,7 @@ func sendTCPBatch(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso s
 // sendTCP sends a TCP segment with the provided options via the provided
 // network endpoint and under the provided identity. This method takes
 // ownership of pkt.
-func sendTCP(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack.GSO, owner tcpip.PacketOwner) tcpip.Error {
+func sendTCP(r *stack.Route, tf tcpFields, pkt *stack.PacketBuffer, gso stack.GSO, owner tcpip.PacketOwner) tcpip.Error {
 	if tf.rcvWnd > math.MaxUint16 {
 		tf.rcvWnd = math.MaxUint16
 	}
@@ -915,7 +914,7 @@ func sendTCP(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack.
 	pkt.Owner = owner
 	buildTCPHdr(r, tf, pkt, gso)
 
-	if err := r.WritePacket(stack.NetworkHeaderParams{Protocol: ProtocolNumber, TTL: tf.ttl, TOS: tf.tos}, pkt); err != nil {
+	if err := r.WritePacket(stack.NetworkHeaderParams{Protocol: ProtocolNumber, TTL: tf.ttl, TOS: tf.tos, DF: tf.df}, pkt); err != nil {
 		r.Stats().TCP.SegmentSendErrors.Increment()
 		return err
 	}
@@ -927,7 +926,7 @@ func sendTCP(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack.
 }
 
 // makeOptions makes an options slice.
-func (e *endpoint) makeOptions(sackBlocks []header.SACKBlock) []byte {
+func (e *Endpoint) makeOptions(sackBlocks []header.SACKBlock) []byte {
 	options := getOptions()
 	offset := 0
 
@@ -966,7 +965,10 @@ func (e *endpoint) makeOptions(sackBlocks []header.SACKBlock) []byte {
 }
 
 // sendEmptyRaw sends a TCP segment with no payload to the endpoint's peer.
-func (e *endpoint) sendEmptyRaw(flags header.TCPFlags, seq, ack seqnum.Value, rcvWnd seqnum.Size) tcpip.Error {
+//
+// +checklocks:e.mu
+// +checklocksalias:e.snd.ep.mu=e.mu
+func (e *Endpoint) sendEmptyRaw(flags header.TCPFlags, seq, ack seqnum.Value, rcvWnd seqnum.Size) tcpip.Error {
 	pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{})
 	defer pkt.DecRef()
 	return e.sendRaw(pkt, flags, seq, ack, rcvWnd)
@@ -974,7 +976,10 @@ func (e *endpoint) sendEmptyRaw(flags header.TCPFlags, seq, ack seqnum.Value, rc
 
 // sendRaw sends a TCP segment to the endpoint's peer. This method takes
 // ownership of pkt. pkt must not have any headers set.
-func (e *endpoint) sendRaw(pkt stack.PacketBufferPtr, flags header.TCPFlags, seq, ack seqnum.Value, rcvWnd seqnum.Size) tcpip.Error {
+//
+// +checklocks:e.mu
+// +checklocksalias:e.snd.ep.mu=e.mu
+func (e *Endpoint) sendRaw(pkt *stack.PacketBuffer, flags header.TCPFlags, seq, ack seqnum.Value, rcvWnd seqnum.Size) tcpip.Error {
 	var sackBlocks []header.SACKBlock
 	if e.EndpointState() == StateEstablished && e.rcv.pendingRcvdSegments.Len() > 0 && (flags&header.TCPFlagAck != 0) {
 		sackBlocks = e.sack.Blocks[:e.sack.NumBlocks]
@@ -991,12 +996,13 @@ func (e *endpoint) sendRaw(pkt stack.PacketBufferPtr, flags header.TCPFlags, seq
 		ack:    ack,
 		rcvWnd: rcvWnd,
 		opts:   options,
+		df:     e.pmtud == tcpip.PMTUDiscoveryWant || e.pmtud == tcpip.PMTUDiscoveryDo,
 	}, pkt, e.gso)
 }
 
 // +checklocks:e.mu
 // +checklocksalias:e.snd.ep.mu=e.mu
-func (e *endpoint) sendData(next *segment) {
+func (e *Endpoint) sendData(next *segment) {
 	// Initialize the next segment to write if it's currently nil.
 	if e.snd.writeNext == nil {
 		if next == nil {
@@ -1014,7 +1020,7 @@ func (e *endpoint) sendData(next *segment) {
 // indicating that the connection is being reset due to receiving a RST. This
 // method must only be called from the protocol goroutine.
 // +checklocks:e.mu
-func (e *endpoint) resetConnectionLocked(err tcpip.Error) {
+func (e *Endpoint) resetConnectionLocked(err tcpip.Error) {
 	// Only send a reset if the connection is being aborted for a reason
 	// other than receiving a reset.
 	e.hardError = err
@@ -1049,7 +1055,7 @@ func (e *endpoint) resetConnectionLocked(err tcpip.Error) {
 // delivered to this endpoint from the demuxer when the endpoint
 // is transitioned to StateClose.
 // +checklocks:e.mu
-func (e *endpoint) transitionToStateCloseLocked() {
+func (e *Endpoint) transitionToStateCloseLocked() {
 	s := e.EndpointState()
 	if s == StateClose {
 		return
@@ -1068,7 +1074,7 @@ func (e *endpoint) transitionToStateCloseLocked() {
 // segment to any other endpoint other than the current one. This is called
 // only when the endpoint is in StateClose and we want to deliver the segment
 // to any other listening endpoint. We reply with RST if we cannot find one.
-func (e *endpoint) tryDeliverSegmentFromClosedEndpoint(s *segment) {
+func (e *Endpoint) tryDeliverSegmentFromClosedEndpoint(s *segment) {
 	ep := e.stack.FindTransportEndpoint(e.NetProto, e.TransProto, e.TransportEndpointInfo.ID, s.pkt.NICID)
 	if ep == nil && e.NetProto == header.IPv6ProtocolNumber && e.TransportEndpointInfo.ID.LocalAddress.To4() != (tcpip.Address{}) {
 		// Dual-stack socket, try IPv4.
@@ -1087,10 +1093,10 @@ func (e *endpoint) tryDeliverSegmentFromClosedEndpoint(s *segment) {
 	}
 
 	if e == ep {
-		panic(fmt.Sprintf("current endpoint not removed from demuxer, enqueing segments to itself, endpoint in state %v", e.EndpointState()))
+		panic(fmt.Sprintf("current endpoint not removed from demuxer, enqueuing segments to itself, endpoint in state %v", e.EndpointState()))
 	}
 
-	if ep := ep.(*endpoint); ep.enqueueSegment(s) {
+	if ep := ep.(*Endpoint); ep.enqueueSegment(s) {
 		ep.notifyProcessor()
 	}
 }
@@ -1098,7 +1104,7 @@ func (e *endpoint) tryDeliverSegmentFromClosedEndpoint(s *segment) {
 // Drain segment queue from the endpoint and try to re-match the segment to a
 // different endpoint. This is used when the current endpoint is transitioned to
 // StateClose and has been unregistered from the transport demuxer.
-func (e *endpoint) drainClosingSegmentQueue() {
+func (e *Endpoint) drainClosingSegmentQueue() {
 	for {
 		s := e.segmentQueue.dequeue()
 		if s == nil {
@@ -1111,7 +1117,7 @@ func (e *endpoint) drainClosingSegmentQueue() {
 }
 
 // +checklocks:e.mu
-func (e *endpoint) handleReset(s *segment) (ok bool, err tcpip.Error) {
+func (e *Endpoint) handleReset(s *segment) (ok bool, err tcpip.Error) {
 	if e.rcv.acceptable(s.sequenceNumber, 0) {
 		// RFC 793, page 37 states that "in all states
 		// except SYN-SENT, all reset (RST) segments are
@@ -1160,7 +1166,7 @@ func (e *endpoint) handleReset(s *segment) (ok bool, err tcpip.Error) {
 //
 // +checklocks:e.mu
 // +checklocksalias:e.snd.ep.mu=e.mu
-func (e *endpoint) handleSegmentsLocked() tcpip.Error {
+func (e *Endpoint) handleSegmentsLocked() tcpip.Error {
 	sndUna := e.snd.SndUna
 	for i := 0; i < maxSegmentsPerWake; i++ {
 		if state := e.EndpointState(); state.closed() || state == StateTimeWait || state == StateError {
@@ -1202,7 +1208,7 @@ func (e *endpoint) handleSegmentsLocked() tcpip.Error {
 }
 
 // +checklocks:e.mu
-func (e *endpoint) probeSegmentLocked() {
+func (e *Endpoint) probeSegmentLocked() {
 	if fn := e.probe; fn != nil {
 		var state stack.TCPEndpointState
 		e.completeStateLocked(&state)
@@ -1216,7 +1222,7 @@ func (e *endpoint) probeSegmentLocked() {
 // +checklocks:e.mu
 // +checklocksalias:e.rcv.ep.mu=e.mu
 // +checklocksalias:e.snd.ep.mu=e.mu
-func (e *endpoint) handleSegmentLocked(s *segment) (cont bool, err tcpip.Error) {
+func (e *Endpoint) handleSegmentLocked(s *segment) (cont bool, err tcpip.Error) {
 	// Invoke the tcp probe if installed. The tcp probe function will update
 	// the TCPEndpointState after the segment is processed.
 	defer e.probeSegmentLocked()
@@ -1291,11 +1297,16 @@ func (e *endpoint) handleSegmentLocked(s *segment) (cont bool, err tcpip.Error)
 // from the other side after a number of tries, we terminate the connection.
 // +checklocks:e.mu
 // +checklocksalias:e.snd.ep.mu=e.mu
-func (e *endpoint) keepaliveTimerExpired() tcpip.Error {
+func (e *Endpoint) keepaliveTimerExpired() tcpip.Error {
 	userTimeout := e.userTimeout
 
+	// If the route is not ready or already cleaned up, then we don't need to
+	// send keepalives.
+	if e.route == nil {
+		return nil
+	}
 	e.keepalive.Lock()
-	if !e.SocketOptions().GetKeepAlive() || e.keepalive.timer.isZero() || !e.keepalive.timer.checkExpiration() {
+	if !e.SocketOptions().GetKeepAlive() || e.keepalive.timer.isUninitialized() || !e.keepalive.timer.checkExpiration() {
 		e.keepalive.Unlock()
 		return nil
 	}
@@ -1325,10 +1336,10 @@ func (e *endpoint) keepaliveTimerExpired() tcpip.Error {
 
 // resetKeepaliveTimer restarts or stops the keepalive timer, depending on
 // whether it is enabled for this endpoint.
-func (e *endpoint) resetKeepaliveTimer(receivedData bool) {
+func (e *Endpoint) resetKeepaliveTimer(receivedData bool) {
 	e.keepalive.Lock()
 	defer e.keepalive.Unlock()
-	if e.keepalive.timer.isZero() {
+	if e.keepalive.timer.isUninitialized() {
 		if state := e.EndpointState(); !state.closed() {
 			panic(fmt.Sprintf("Unexpected state when the keepalive time is cleaned up, got %s, want %s or %s", state, StateClose, StateError))
 		}
@@ -1351,7 +1362,7 @@ func (e *endpoint) resetKeepaliveTimer(receivedData bool) {
 }
 
 // disableKeepaliveTimer stops the keepalive timer.
-func (e *endpoint) disableKeepaliveTimer() {
+func (e *Endpoint) disableKeepaliveTimer() {
 	e.keepalive.Lock()
 	e.keepalive.timer.disable()
 	e.keepalive.Unlock()
@@ -1359,7 +1370,7 @@ func (e *endpoint) disableKeepaliveTimer() {
 
 // finWait2TimerExpired is called when the FIN-WAIT-2 timeout is hit
 // and the peer hasn't sent us a FIN.
-func (e *endpoint) finWait2TimerExpired() {
+func (e *Endpoint) finWait2TimerExpired() {
 	e.mu.Lock()
 	e.transitionToStateCloseLocked()
 	e.mu.Unlock()
@@ -1368,7 +1379,7 @@ func (e *endpoint) finWait2TimerExpired() {
 }
 
 // +checklocks:e.mu
-func (e *endpoint) handshakeFailed(err tcpip.Error) {
+func (e *Endpoint) handshakeFailed(err tcpip.Error) {
 	e.lastErrorMu.Lock()
 	e.lastError = err
 	e.lastErrorMu.Unlock()
@@ -1388,7 +1399,7 @@ func (e *endpoint) handshakeFailed(err tcpip.Error) {
 // state.
 // +checklocks:e.mu
 // +checklocksalias:e.rcv.ep.mu=e.mu
-func (e *endpoint) handleTimeWaitSegments() (extendTimeWait bool, reuseTW func()) {
+func (e *Endpoint) handleTimeWaitSegments() (extendTimeWait bool, reuseTW func()) {
 	for i := 0; i < maxSegmentsPerWake; i++ {
 		s := e.segmentQueue.dequeue()
 		if s == nil {
@@ -1409,7 +1420,7 @@ func (e *endpoint) handleTimeWaitSegments() (extendTimeWait bool, reuseTW func()
 			}
 			for _, netProto := range netProtos {
 				if listenEP := e.stack.FindTransportEndpoint(netProto, info.TransProto, newID, s.pkt.NICID); listenEP != nil {
-					tcpEP := listenEP.(*endpoint)
+					tcpEP := listenEP.(*Endpoint)
 					if EndpointState(tcpEP.State()) == StateListen {
 						reuseTW = func() {
 							if !tcpEP.enqueueSegment(s) {
@@ -1434,7 +1445,7 @@ func (e *endpoint) handleTimeWaitSegments() (extendTimeWait bool, reuseTW func()
 }
 
 // +checklocks:e.mu
-func (e *endpoint) getTimeWaitDuration() time.Duration {
+func (e *Endpoint) getTimeWaitDuration() time.Duration {
 	timeWaitDuration := DefaultTCPTimeWaitTimeout
 
 	// Get the stack wide configuration.
@@ -1448,7 +1459,7 @@ func (e *endpoint) getTimeWaitDuration() time.Duration {
 // timeWaitTimerExpired is called when an endpoint completes the required time
 // (typically 2 * MSL unless configured to something else at a stack level) in
 // TIME-WAIT state.
-func (e *endpoint) timeWaitTimerExpired() {
+func (e *Endpoint) timeWaitTimerExpired() {
 	e.mu.Lock()
 	if e.EndpointState() != StateTimeWait {
 		e.mu.Unlock()
@@ -1461,7 +1472,7 @@ func (e *endpoint) timeWaitTimerExpired() {
 }
 
 // notifyProcessor queues this endpoint for processing to its TCP processor.
-func (e *endpoint) notifyProcessor() {
+func (e *Endpoint) notifyProcessor() {
 	// We use TryLock here to avoid deadlocks in cases where a listening endpoint that is being
 	// closed tries to abort half completed connections which in turn try to queue any segments
 	// queued to that endpoint back to the same listening endpoint (because it may have got
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/cubic.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/cubic.go
index 6985194b..0b4e9c0c 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/cubic.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/cubic.go
@@ -18,9 +18,40 @@ import (
 	"math"
 	"time"
 
+	"gvisor.dev/gvisor/pkg/tcpip"
 	"gvisor.dev/gvisor/pkg/tcpip/stack"
 )
 
+// effectivelyInfinity is an initialization value used for round-trip times
+// that are then set using min.  It is equal to approximately 100 years: large
+// enough that it will always be greater than a real TCP round-trip time, and
+// small enough that it fits in time.Duration.
+const effectivelyInfinity = time.Duration(math.MaxInt64)
+
+const (
+	// RTT = round-trip time.
+
+	// The delay increase sensitivity is determined by minRTTThresh and
+	// maxRTTThresh. Smaller values of minRTTThresh may cause spurious exits
+	// from slow start. Larger values of maxRTTThresh may result in slow start
+	// not exiting until loss is encountered for connections on large RTT paths.
+	minRTTThresh = 4 * time.Millisecond
+	maxRTTThresh = 16 * time.Millisecond
+
+	// minRTTDivisor is a fraction of RTT to compute the delay threshold. A
+	// smaller value would mean a larger threshold and thus less sensitivity to
+	// delay increase, and vice versa.
+	minRTTDivisor = 8
+
+	// nRTTSample is the minimum number of RTT samples in the round before
+	// considering whether to exit the round due to increased RTT.
+	nRTTSample = 8
+
+	// ackDelta is the maximum time between ACKs for them to be considered part
+	// of the same ACK Train during HyStart
+	ackDelta = 2 * time.Millisecond
+)
+
 // cubicState stores the variables related to TCP CUBIC congestion
 // control algorithm state.
 //
@@ -39,11 +70,19 @@ type cubicState struct {
 // newCubicCC returns a partially initialized cubic state with the constants
 // beta and c set and t set to current time.
 func newCubicCC(s *sender) *cubicState {
+	now := s.ep.stack.Clock().NowMonotonic()
 	return &cubicState{
 		TCPCubicState: stack.TCPCubicState{
-			T:    s.ep.stack.Clock().NowMonotonic(),
+			T:    now,
 			Beta: 0.7,
 			C:    0.4,
+			// By this point, the sender has initialized it's initial sequence
+			// number.
+			EndSeq:     s.SndNxt,
+			LastRTT:    effectivelyInfinity,
+			CurrRTT:    effectivelyInfinity,
+			LastAck:    now,
+			RoundStart: now,
 		},
 		s: s,
 	}
@@ -66,6 +105,62 @@ func (c *cubicState) enterCongestionAvoidance() {
 	}
 }
 
+// updateHyStart tracks packet round-trip time (rtt) to find a safe threshold
+// to exit slow start without triggering packet loss.  It updates the SSThresh
+// when it does.
+//
+// Implementation of HyStart follows the algorithm from the Linux kernel, rather
+// than RFC 9406 (https://www.rfc-editor.org/rfc/rfc9406.html). Briefly, the
+// Linux kernel algorithm is based directly on the original HyStart paper
+// (https://doi.org/10.1016/j.comnet.2011.01.014), and differs from the RFC in
+// that two detection algorithms run in parallel ('ACK train' and 'Delay
+// increase').  The RFC version includes only the latter algorithm and adds an
+// intermediate phase called Conservative Slow Start, which is not implemented
+// here.
+func (c *cubicState) updateHyStart(rtt time.Duration) {
+	if rtt < 0 {
+		// negative indicates unknown
+		return
+	}
+	now := c.s.ep.stack.Clock().NowMonotonic()
+	if c.EndSeq.LessThan(c.s.SndUna) {
+		c.beginHyStartRound(now)
+	}
+	// ACK train
+	if now.Sub(c.LastAck) < ackDelta && // ensures acks are part of the same "train"
+		c.LastRTT < effectivelyInfinity {
+		c.LastAck = now
+		if thresh := c.LastRTT / 2; now.Sub(c.RoundStart) > thresh {
+			c.s.Ssthresh = c.s.SndCwnd
+		}
+	}
+
+	// Delay increase
+	c.CurrRTT = min(c.CurrRTT, rtt)
+	c.SampleCount++
+
+	if c.SampleCount >= nRTTSample && c.LastRTT < effectivelyInfinity {
+		// i.e. LastRTT/minRTTDivisor, but clamped to minRTTThresh & maxRTTThresh
+		thresh := max(
+			minRTTThresh,
+			min(maxRTTThresh, c.LastRTT/minRTTDivisor),
+		)
+		if c.CurrRTT >= (c.LastRTT + thresh) {
+			// Triggered HyStart safe exit threshold
+			c.s.Ssthresh = c.s.SndCwnd
+		}
+	}
+}
+
+func (c *cubicState) beginHyStartRound(now tcpip.MonotonicTime) {
+	c.EndSeq = c.s.SndNxt
+	c.SampleCount = 0
+	c.LastRTT = c.CurrRTT
+	c.CurrRTT = effectivelyInfinity
+	c.LastAck = now
+	c.RoundStart = now
+}
+
 // updateSlowStart will update the congestion window as per the slow-start
 // algorithm used by NewReno. If after adjusting the congestion window we cross
 // the ssThresh then it will return the number of packets that must be consumed
@@ -92,7 +187,10 @@ func (c *cubicState) updateSlowStart(packetsAcked int) int {
 // Update updates cubic's internal state variables. It must be called on every
 // ACK received.
 // Refer: https://tools.ietf.org/html/rfc8312#section-4
-func (c *cubicState) Update(packetsAcked int) {
+func (c *cubicState) Update(packetsAcked int, rtt time.Duration) {
+	if c.s.Ssthresh == InitialSsthresh && c.s.SndCwnd < c.s.Ssthresh {
+		c.updateHyStart(rtt)
+	}
 	if c.s.SndCwnd < c.s.Ssthresh {
 		packetsAcked = c.updateSlowStart(packetsAcked)
 		if packetsAcked == 0 {
@@ -192,7 +290,7 @@ func (c *cubicState) fastConvergence() {
 	c.K = math.Cbrt(c.WMax * (1 - c.Beta) / c.C)
 }
 
-// PostRecovery implemements congestionControl.PostRecovery.
+// PostRecovery implements congestionControl.PostRecovery.
 func (c *cubicState) PostRecovery() {
 	c.T = c.s.ep.stack.Clock().NowMonotonic()
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go
index b647b781..aeebbd64 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go
@@ -29,13 +29,15 @@ import (
 )
 
 // epQueue is a queue of endpoints.
+//
+// +stateify savable
 type epQueue struct {
-	mu   sync.Mutex
+	mu   sync.Mutex `state:"nosave"`
 	list endpointList
 }
 
 // enqueue adds e to the queue if the endpoint is not already on the queue.
-func (q *epQueue) enqueue(e *endpoint) {
+func (q *epQueue) enqueue(e *Endpoint) {
 	q.mu.Lock()
 	defer q.mu.Unlock()
 	e.pendingProcessingMu.Lock()
@@ -50,7 +52,7 @@ func (q *epQueue) enqueue(e *endpoint) {
 
 // dequeue removes and returns the first element from the queue if available,
 // returns nil otherwise.
-func (q *epQueue) dequeue() *endpoint {
+func (q *epQueue) dequeue() *Endpoint {
 	q.mu.Lock()
 	if e := q.list.Front(); e != nil {
 		q.list.Remove(e)
@@ -73,21 +75,24 @@ func (q *epQueue) empty() bool {
 }
 
 // processor is responsible for processing packets queued to a tcp endpoint.
+//
+// +stateify savable
 type processor struct {
-	epQ              epQueue
-	sleeper          sleep.Sleeper
-	newEndpointWaker sleep.Waker
-	closeWaker       sleep.Waker
-	pauseWaker       sleep.Waker
-	pauseChan        chan struct{}
-	resumeChan       chan struct{}
+	epQ     epQueue
+	sleeper sleep.Sleeper
+	// TODO(b/341946753): Restore them when netstack is savable.
+	newEndpointWaker sleep.Waker   `state:"nosave"`
+	closeWaker       sleep.Waker   `state:"nosave"`
+	pauseWaker       sleep.Waker   `state:"nosave"`
+	pauseChan        chan struct{} `state:"nosave"`
+	resumeChan       chan struct{} `state:"nosave"`
 }
 
 func (p *processor) close() {
 	p.closeWaker.Assert()
 }
 
-func (p *processor) queueEndpoint(ep *endpoint) {
+func (p *processor) queueEndpoint(ep *Endpoint) {
 	// Queue an endpoint for processing by the processor goroutine.
 	p.epQ.enqueue(ep)
 	p.newEndpointWaker.Assert()
@@ -97,7 +102,7 @@ func (p *processor) queueEndpoint(ep *endpoint) {
 // of its associated listening endpoint.
 //
 // +checklocks:ep.mu
-func deliverAccepted(ep *endpoint) bool {
+func deliverAccepted(ep *Endpoint) bool {
 	lEP := ep.h.listenEP
 	lEP.acceptMu.Lock()
 
@@ -129,7 +134,7 @@ func deliverAccepted(ep *endpoint) bool {
 
 // handleConnecting is responsible for TCP processing for an endpoint in one of
 // the connecting states.
-func (p *processor) handleConnecting(ep *endpoint) {
+func handleConnecting(ep *Endpoint) {
 	if !ep.TryLock() {
 		return
 	}
@@ -172,7 +177,7 @@ func (p *processor) handleConnecting(ep *endpoint) {
 
 // handleConnected is responsible for TCP processing for an endpoint in one of
 // the connected states(StateEstablished, StateFinWait1 etc.)
-func (p *processor) handleConnected(ep *endpoint) {
+func handleConnected(ep *Endpoint) {
 	if !ep.TryLock() {
 		return
 	}
@@ -200,7 +205,7 @@ func (p *processor) handleConnected(ep *endpoint) {
 		ep.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.ReadableEvents | waiter.WritableEvents)
 		return
 	case ep.EndpointState() == StateTimeWait:
-		p.startTimeWait(ep)
+		startTimeWait(ep)
 	}
 	ep.mu.Unlock()
 }
@@ -208,7 +213,7 @@ func (p *processor) handleConnected(ep *endpoint) {
 // startTimeWait starts a new goroutine to handle TIME-WAIT.
 //
 // +checklocks:ep.mu
-func (p *processor) startTimeWait(ep *endpoint) {
+func startTimeWait(ep *Endpoint) {
 	// Disable close timer as we are now entering real TIME_WAIT.
 	if ep.finWait2Timer != nil {
 		ep.finWait2Timer.Stop()
@@ -221,7 +226,7 @@ func (p *processor) startTimeWait(ep *endpoint) {
 
 // handleTimeWait is responsible for TCP processing for an endpoint in TIME-WAIT
 // state.
-func (p *processor) handleTimeWait(ep *endpoint) {
+func handleTimeWait(ep *Endpoint) {
 	if !ep.TryLock() {
 		return
 	}
@@ -251,7 +256,7 @@ func (p *processor) handleTimeWait(ep *endpoint) {
 
 // handleListen is responsible for TCP processing for an endpoint in LISTEN
 // state.
-func (p *processor) handleListen(ep *endpoint) {
+func handleListen(ep *Endpoint) {
 	if !ep.TryLock() {
 		return
 	}
@@ -307,13 +312,13 @@ func (p *processor) start(wg *sync.WaitGroup) {
 				}
 				switch state := ep.EndpointState(); {
 				case state.connecting():
-					p.handleConnecting(ep)
+					handleConnecting(ep)
 				case state.connected() && state != StateTimeWait:
-					p.handleConnected(ep)
+					handleConnected(ep)
 				case state == StateTimeWait:
-					p.handleTimeWait(ep)
+					handleTimeWait(ep)
 				case state == StateListen:
-					p.handleListen(ep)
+					handleListen(ep)
 				case state == StateError || state == StateClose:
 					// Try to redeliver any still queued
 					// packets to another endpoint or send a
@@ -355,11 +360,13 @@ func (p *processor) resume() {
 // goroutines do full tcp processing. The processor is selected based on the
 // hash of the endpoint id to ensure that delivery for the same endpoint happens
 // in-order.
+//
+// +stateify savable
 type dispatcher struct {
 	processors []processor
-	wg         sync.WaitGroup
+	wg         sync.WaitGroup `state:"nosave"`
 	hasher     jenkinsHasher
-	mu         sync.Mutex
+	mu         sync.Mutex `state:"nosave"`
 	// +checklocks:mu
 	paused bool
 	// +checklocks:mu
@@ -409,7 +416,7 @@ func (d *dispatcher) wait() {
 
 // queuePacket queues an incoming packet to the matching tcp endpoint and
 // also queues the endpoint to a processor queue for processing.
-func (d *dispatcher) queuePacket(stackEP stack.TransportEndpoint, id stack.TransportEndpointID, clock tcpip.Clock, pkt stack.PacketBufferPtr) {
+func (d *dispatcher) queuePacket(stackEP stack.TransportEndpoint, id stack.TransportEndpointID, clock tcpip.Clock, pkt *stack.PacketBuffer) {
 	d.mu.Lock()
 	closed := d.closed
 	d.mu.Unlock()
@@ -418,7 +425,7 @@ func (d *dispatcher) queuePacket(stackEP stack.TransportEndpoint, id stack.Trans
 		return
 	}
 
-	ep := stackEP.(*endpoint)
+	ep := stackEP.(*Endpoint)
 
 	s, err := newIncomingSegment(id, clock, pkt)
 	if err != nil {
@@ -491,6 +498,8 @@ func (d *dispatcher) resume() {
 }
 
 // jenkinsHasher contains state needed to for a jenkins hash.
+//
+// +stateify savable
 type jenkinsHasher struct {
 	seed uint32
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go
index 7b0af175..5cd028b4 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go
@@ -16,7 +16,6 @@ package tcp
 
 import (
 	"container/heap"
-	"encoding/binary"
 	"fmt"
 	"io"
 	"math"
@@ -29,7 +28,6 @@ import (
 	"gvisor.dev/gvisor/pkg/sleep"
 	"gvisor.dev/gvisor/pkg/sync"
 	"gvisor.dev/gvisor/pkg/tcpip"
-	"gvisor.dev/gvisor/pkg/tcpip/hash/jenkins"
 	"gvisor.dev/gvisor/pkg/tcpip/header"
 	"gvisor.dev/gvisor/pkg/tcpip/ports"
 	"gvisor.dev/gvisor/pkg/tcpip/seqnum"
@@ -305,7 +303,7 @@ func (sq *sndQueueInfo) CloneState(other *stack.TCPSndBufState) {
 	other.AutoTuneSndBufDisabled = atomicbitops.FromUint32(sq.AutoTuneSndBufDisabled.RacyLoad())
 }
 
-// endpoint represents a TCP endpoint. This struct serves as the interface
+// Endpoint represents a TCP endpoint. This struct serves as the interface
 // between users of the endpoint and the protocol implementation; it is legal to
 // have concurrent goroutines make calls into the endpoint, they are properly
 // synchronized. The protocol implementation, however, runs in a single
@@ -345,12 +343,12 @@ func (sq *sndQueueInfo) CloneState(other *stack.TCPSndBufState) {
 // e.LockUser/e.UnlockUser methods.
 //
 // +stateify savable
-type endpoint struct {
+type Endpoint struct {
 	stack.TCPEndpointStateInner
 	stack.TransportEndpointInfo
 	tcpip.DefaultSocketOptionsHandler
 
-	// endpointEntry is used to queue endpoints for processing to the
+	// EndpointEntry is used to queue endpoints for processing to the
 	// a given tcp processor goroutine.
 	//
 	// Precondition: epQueue.mu must be held to read/write this field..
@@ -369,7 +367,6 @@ type endpoint struct {
 	stack       *stack.Stack  `state:"manual"`
 	protocol    *protocol     `state:"manual"`
 	waiterQueue *waiter.Queue `state:"wait"`
-	uniqueID    uint64
 
 	// hardError is meaningful only when state is stateError. It stores the
 	// error to be returned when read/write syscalls are called and the
@@ -597,11 +594,16 @@ type endpoint struct {
 	// listenCtx is used by listening endpoints to store state used while listening for
 	// connections. Nil otherwise.
 	listenCtx *listenContext `state:"nosave"`
-}
 
-// UniqueID implements stack.TransportEndpoint.UniqueID.
-func (e *endpoint) UniqueID() uint64 {
-	return e.uniqueID
+	// limRdr is reused to avoid allocations.
+	//
+	// +checklocks:mu
+	limRdr *io.LimitedReader `state:"nosave"`
+
+	// pmtud is the PMTUD strategy to use.
+	//
+	// +checklocks:mu
+	pmtud tcpip.PMTUDStrategy
 }
 
 // calculateAdvertisedMSS calculates the MSS to advertise.
@@ -622,7 +624,7 @@ func calculateAdvertisedMSS(userMSS uint16, r *stack.Route) uint16 {
 
 // isOwnedByUser() returns true if the endpoint lock is currently
 // held by a user(syscall) goroutine.
-func (e *endpoint) isOwnedByUser() bool {
+func (e *Endpoint) isOwnedByUser() bool {
 	return e.ownedByUser.Load() == 1
 }
 
@@ -636,7 +638,7 @@ func (e *endpoint) isOwnedByUser() bool {
 // should not be holding the lock for long and spinning reduces latency as we
 // avoid an expensive sleep/wakeup of the syscall goroutine).
 // +checklocksacquire:e.mu
-func (e *endpoint) LockUser() {
+func (e *Endpoint) LockUser() {
 	const iterations = 5
 	for i := 0; i < iterations; i++ {
 		// Try first if the sock is locked then check if it's owned
@@ -691,7 +693,7 @@ func (e *endpoint) LockUser() {
 //
 // Precondition: e.LockUser() must have been called before calling e.UnlockUser()
 // +checklocksrelease:e.mu
-func (e *endpoint) UnlockUser() {
+func (e *Endpoint) UnlockUser() {
 	// Lock segment queue before checking so that we avoid a race where
 	// segments can be queued between the time we check if queue is empty
 	// and actually unlock the endpoint mutex.
@@ -724,13 +726,13 @@ func (e *endpoint) UnlockUser() {
 
 // StopWork halts packet processing. Only to be used in tests.
 // +checklocksacquire:e.mu
-func (e *endpoint) StopWork() {
+func (e *Endpoint) StopWork() {
 	e.mu.Lock()
 }
 
 // ResumeWork resumes packet processing. Only to be used in tests.
 // +checklocksrelease:e.mu
-func (e *endpoint) ResumeWork() {
+func (e *Endpoint) ResumeWork() {
 	e.mu.Unlock()
 }
 
@@ -743,7 +745,7 @@ func (e *endpoint) ResumeWork() {
 // variable locks.
 // +checklocks:locked.mu
 // +checklocksacquire:e.mu
-func (e *endpoint) AssertLockHeld(locked *endpoint) {
+func (e *Endpoint) AssertLockHeld(locked *Endpoint) {
 	if e != locked {
 		panic("AssertLockHeld failed: locked endpoint != asserting endpoint")
 	}
@@ -753,7 +755,7 @@ func (e *endpoint) AssertLockHeld(locked *endpoint) {
 // adds the necessary checklocks annotations.
 // TODO(b/226403629): Remove this once checklocks understands TryLock.
 // +checklocksacquire:e.mu
-func (e *endpoint) TryLock() bool {
+func (e *Endpoint) TryLock() bool {
 	if e.mu.TryLock() {
 		return true // +checklocksforce
 	}
@@ -765,7 +767,7 @@ func (e *endpoint) TryLock() bool {
 // package but we allow the state to be read freely without holding e.mu.
 //
 // +checklocks:e.mu
-func (e *endpoint) setEndpointState(state EndpointState) {
+func (e *Endpoint) setEndpointState(state EndpointState) {
 	oldstate := EndpointState(e.state.Swap(uint32(state)))
 	switch state {
 	case StateEstablished:
@@ -789,18 +791,18 @@ func (e *endpoint) setEndpointState(state EndpointState) {
 }
 
 // EndpointState returns the current state of the endpoint.
-func (e *endpoint) EndpointState() EndpointState {
+func (e *Endpoint) EndpointState() EndpointState {
 	return EndpointState(e.state.Load())
 }
 
 // setRecentTimestamp sets the recentTS field to the provided value.
-func (e *endpoint) setRecentTimestamp(recentTS uint32) {
+func (e *Endpoint) setRecentTimestamp(recentTS uint32) {
 	e.RecentTS = recentTS
 	e.recentTSTime = e.stack.Clock().NowMonotonic()
 }
 
 // recentTimestamp returns the value of the recentTS field.
-func (e *endpoint) recentTimestamp() uint32 {
+func (e *Endpoint) recentTimestamp() uint32 {
 	return e.RecentTS
 }
 
@@ -838,8 +840,8 @@ type keepalive struct {
 	waker sleep.Waker `state:"nosave"`
 }
 
-func newEndpoint(s *stack.Stack, protocol *protocol, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) *endpoint {
-	e := &endpoint{
+func newEndpoint(s *stack.Stack, protocol *protocol, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) *Endpoint {
+	e := &Endpoint{
 		stack:    s,
 		protocol: protocol,
 		TransportEndpointInfo: stack.TransportEndpointInfo{
@@ -858,12 +860,14 @@ func newEndpoint(s *stack.Stack, protocol *protocol, netProto tcpip.NetworkProto
 			interval: DefaultKeepaliveInterval,
 			count:    DefaultKeepaliveCount,
 		},
-		uniqueID:      s.UniqueID(),
-		ipv4TTL:       tcpip.UseDefaultIPv4TTL,
-		ipv6HopLimit:  tcpip.UseDefaultIPv6HopLimit,
-		txHash:        s.Rand().Uint32(),
+		ipv4TTL:      tcpip.UseDefaultIPv4TTL,
+		ipv6HopLimit: tcpip.UseDefaultIPv6HopLimit,
+		// txHash only determines which outgoing queue to use, so
+		// InsecureRNG is fine.
+		txHash:        s.InsecureRNG().Uint32(),
 		windowClamp:   DefaultReceiveBufferSize,
 		maxSynRetries: DefaultSynRetries,
+		limRdr:        &io.LimitedReader{},
 	}
 	e.ops.InitHandler(e, e.stack, GetTCPSendBufferLimits, GetTCPReceiveBufferLimits)
 	e.ops.SetMulticastLoop(true)
@@ -914,14 +918,14 @@ func newEndpoint(s *stack.Stack, protocol *protocol, netProto tcpip.NetworkProto
 
 	// TODO(https://gvisor.dev/issues/7493): Defer creating the timer until TCP connection becomes
 	// established.
-	e.keepalive.timer.init(e.stack.Clock(), maybeFailTimerHandler(e, e.keepaliveTimerExpired))
+	e.keepalive.timer.init(e.stack.Clock(), timerHandler(e, e.keepaliveTimerExpired))
 
 	return e
 }
 
 // Readiness returns the current readiness of the endpoint. For example, if
 // waiter.EventIn is set, the endpoint is immediately readable.
-func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {
+func (e *Endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {
 	result := waiter.EventMask(0)
 
 	switch e.EndpointState() {
@@ -983,7 +987,7 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {
 }
 
 // Purging pending rcv segments is only necessary on RST.
-func (e *endpoint) purgePendingRcvQueue() {
+func (e *Endpoint) purgePendingRcvQueue() {
 	if e.rcv != nil {
 		for e.rcv.pendingRcvdSegments.Len() > 0 {
 			s := heap.Pop(&e.rcv.pendingRcvdSegments).(*segment)
@@ -993,7 +997,7 @@ func (e *endpoint) purgePendingRcvQueue() {
 }
 
 // +checklocks:e.mu
-func (e *endpoint) purgeReadQueue() {
+func (e *Endpoint) purgeReadQueue() {
 	if e.rcv != nil {
 		e.rcvQueueMu.Lock()
 		defer e.rcvQueueMu.Unlock()
@@ -1010,7 +1014,7 @@ func (e *endpoint) purgeReadQueue() {
 }
 
 // +checklocks:e.mu
-func (e *endpoint) purgeWriteQueue() {
+func (e *Endpoint) purgeWriteQueue() {
 	if e.snd != nil {
 		e.sndQueueInfo.sndQueueMu.Lock()
 		defer e.sndQueueInfo.sndQueueMu.Unlock()
@@ -1029,7 +1033,7 @@ func (e *endpoint) purgeWriteQueue() {
 }
 
 // Abort implements stack.TransportEndpoint.Abort.
-func (e *endpoint) Abort() {
+func (e *Endpoint) Abort() {
 	defer e.drainClosingSegmentQueue()
 	e.LockUser()
 	defer e.UnlockUser()
@@ -1047,7 +1051,7 @@ func (e *endpoint) Abort() {
 // Close puts the endpoint in a closed state and frees all resources associated
 // with it. It must be called only once and with no other concurrent calls to
 // the endpoint.
-func (e *endpoint) Close() {
+func (e *Endpoint) Close() {
 	e.LockUser()
 	if e.closed {
 		e.UnlockUser()
@@ -1071,7 +1075,7 @@ func (e *endpoint) Close() {
 }
 
 // +checklocks:e.mu
-func (e *endpoint) closeLocked() {
+func (e *Endpoint) closeLocked() {
 	linger := e.SocketOptions().GetLinger()
 	if linger.Enabled && linger.Timeout == 0 {
 		s := e.EndpointState()
@@ -1092,7 +1096,7 @@ func (e *endpoint) closeLocked() {
 
 // closeNoShutdown closes the endpoint without doing a full shutdown.
 // +checklocks:e.mu
-func (e *endpoint) closeNoShutdownLocked() {
+func (e *Endpoint) closeNoShutdownLocked() {
 	// For listening sockets, we always release ports inline so that they
 	// are immediately available for reuse after Close() is called. If also
 	// registered, we unregister as well otherwise the next user would fail
@@ -1152,15 +1156,15 @@ func (e *endpoint) closeNoShutdownLocked() {
 
 // closePendingAcceptableConnections closes all connections that have completed
 // handshake but not yet been delivered to the application.
-func (e *endpoint) closePendingAcceptableConnectionsLocked() {
+func (e *Endpoint) closePendingAcceptableConnectionsLocked() {
 	e.acceptMu.Lock()
 
 	pendingEndpoints := e.acceptQueue.pendingEndpoints
 	e.acceptQueue.pendingEndpoints = nil
 
-	completedEndpoints := make([]*endpoint, 0, e.acceptQueue.endpoints.Len())
+	completedEndpoints := make([]*Endpoint, 0, e.acceptQueue.endpoints.Len())
 	for n := e.acceptQueue.endpoints.Front(); n != nil; n = n.Next() {
-		completedEndpoints = append(completedEndpoints, n.Value.(*endpoint))
+		completedEndpoints = append(completedEndpoints, n.Value.(*Endpoint))
 	}
 	e.acceptQueue.endpoints.Init()
 	e.acceptQueue.capacity = 0
@@ -1179,11 +1183,12 @@ func (e *endpoint) closePendingAcceptableConnectionsLocked() {
 
 // cleanupLocked frees all resources associated with the endpoint.
 // +checklocks:e.mu
-func (e *endpoint) cleanupLocked() {
+func (e *Endpoint) cleanupLocked() {
 	if e.snd != nil {
 		e.snd.resendTimer.cleanup()
 		e.snd.probeTimer.cleanup()
 		e.snd.reorderTimer.cleanup()
+		e.snd.corkTimer.cleanup()
 	}
 
 	if e.finWait2Timer != nil {
@@ -1244,7 +1249,7 @@ func wndFromSpace(space int) int {
 
 // initialReceiveWindow returns the initial receive window to advertise in the
 // SYN/SYN-ACK.
-func (e *endpoint) initialReceiveWindow() int {
+func (e *Endpoint) initialReceiveWindow() int {
 	rcvWnd := wndFromSpace(e.receiveBufferAvailable())
 	if rcvWnd > math.MaxUint16 {
 		rcvWnd = math.MaxUint16
@@ -1273,7 +1278,7 @@ func (e *endpoint) initialReceiveWindow() int {
 
 // ModerateRecvBuf adjusts the receive buffer and the advertised window
 // based on the number of bytes copied to userspace.
-func (e *endpoint) ModerateRecvBuf(copied int) {
+func (e *Endpoint) ModerateRecvBuf(copied int) {
 	e.LockUser()
 	defer e.UnlockUser()
 
@@ -1351,19 +1356,19 @@ func (e *endpoint) ModerateRecvBuf(copied int) {
 }
 
 // SetOwner implements tcpip.Endpoint.SetOwner.
-func (e *endpoint) SetOwner(owner tcpip.PacketOwner) {
+func (e *Endpoint) SetOwner(owner tcpip.PacketOwner) {
 	e.owner = owner
 }
 
 // +checklocks:e.mu
-func (e *endpoint) hardErrorLocked() tcpip.Error {
+func (e *Endpoint) hardErrorLocked() tcpip.Error {
 	err := e.hardError
 	e.hardError = nil
 	return err
 }
 
 // +checklocks:e.mu
-func (e *endpoint) lastErrorLocked() tcpip.Error {
+func (e *Endpoint) lastErrorLocked() tcpip.Error {
 	e.lastErrorMu.Lock()
 	defer e.lastErrorMu.Unlock()
 	err := e.lastError
@@ -1372,7 +1377,7 @@ func (e *endpoint) lastErrorLocked() tcpip.Error {
 }
 
 // LastError implements tcpip.Endpoint.LastError.
-func (e *endpoint) LastError() tcpip.Error {
+func (e *Endpoint) LastError() tcpip.Error {
 	e.LockUser()
 	defer e.UnlockUser()
 	if err := e.hardErrorLocked(); err != nil {
@@ -1384,12 +1389,12 @@ func (e *endpoint) LastError() tcpip.Error {
 // LastErrorLocked reads and clears lastError.
 // Only to be used in tests.
 // +checklocks:e.mu
-func (e *endpoint) LastErrorLocked() tcpip.Error {
+func (e *Endpoint) LastErrorLocked() tcpip.Error {
 	return e.lastErrorLocked()
 }
 
 // UpdateLastError implements tcpip.SocketOptionsHandler.UpdateLastError.
-func (e *endpoint) UpdateLastError(err tcpip.Error) {
+func (e *Endpoint) UpdateLastError(err tcpip.Error) {
 	e.LockUser()
 	e.lastErrorMu.Lock()
 	e.lastError = err
@@ -1398,7 +1403,7 @@ func (e *endpoint) UpdateLastError(err tcpip.Error) {
 }
 
 // Read implements tcpip.Endpoint.Read.
-func (e *endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResult, tcpip.Error) {
+func (e *Endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResult, tcpip.Error) {
 	e.LockUser()
 	defer e.UnlockUser()
 
@@ -1476,7 +1481,7 @@ func (e *endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResult
 // checkRead checks that endpoint is in a readable state.
 //
 // +checklocks:e.mu
-func (e *endpoint) checkReadLocked() tcpip.Error {
+func (e *Endpoint) checkReadLocked() tcpip.Error {
 	e.rcvQueueMu.Lock()
 	defer e.rcvQueueMu.Unlock()
 	// When in SYN-SENT state, let the caller block on the receive.
@@ -1519,7 +1524,7 @@ func (e *endpoint) checkReadLocked() tcpip.Error {
 // indicating the reason why it's not writable.
 // +checklocks:e.mu
 // +checklocks:e.sndQueueInfo.sndQueueMu
-func (e *endpoint) isEndpointWritableLocked() (int, tcpip.Error) {
+func (e *Endpoint) isEndpointWritableLocked() (int, tcpip.Error) {
 	// The endpoint cannot be written to if it's not connected.
 	switch s := e.EndpointState(); {
 	case s == StateError:
@@ -1553,13 +1558,19 @@ func (e *endpoint) isEndpointWritableLocked() (int, tcpip.Error) {
 // readFromPayloader reads a slice from the Payloader.
 // +checklocks:e.mu
 // +checklocks:e.sndQueueInfo.sndQueueMu
-func (e *endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions, avail int) (buffer.Buffer, tcpip.Error) {
+func (e *Endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions, avail int) (buffer.Buffer, tcpip.Error) {
 	// We can release locks while copying data.
 	//
 	// This is not possible if atomic is set, because we can't allow the
 	// available buffer space to be consumed by some other caller while we
 	// are copying data in.
+	limRdr := e.limRdr
 	if !opts.Atomic {
+		defer func() {
+			e.limRdr = limRdr
+		}()
+		e.limRdr = nil
+
 		e.sndQueueInfo.sndQueueMu.Unlock()
 		defer e.sndQueueInfo.sndQueueMu.Lock()
 
@@ -1575,7 +1586,7 @@ func (e *endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions,
 	if avail == 0 {
 		return payload, nil
 	}
-	if _, err := payload.WriteFromReader(p, int64(avail)); err != nil {
+	if _, err := payload.WriteFromReaderAndLimitedReader(p, int64(avail), limRdr); err != nil {
 		payload.Release()
 		return buffer.Buffer{}, &tcpip.ErrBadBuffer{}
 	}
@@ -1584,7 +1595,7 @@ func (e *endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions,
 
 // queueSegment reads data from the payloader and returns a segment to be sent.
 // +checklocks:e.mu
-func (e *endpoint) queueSegment(p tcpip.Payloader, opts tcpip.WriteOptions) (*segment, int, tcpip.Error) {
+func (e *Endpoint) queueSegment(p tcpip.Payloader, opts tcpip.WriteOptions) (*segment, int, tcpip.Error) {
 	e.sndQueueInfo.sndQueueMu.Lock()
 	defer e.sndQueueInfo.sndQueueMu.Unlock()
 
@@ -1632,7 +1643,7 @@ func (e *endpoint) queueSegment(p tcpip.Payloader, opts tcpip.WriteOptions) (*se
 }
 
 // Write writes data to the endpoint's peer.
-func (e *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcpip.Error) {
+func (e *Endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcpip.Error) {
 	// Linux completely ignores any address passed to sendto(2) for TCP sockets
 	// (without the MSG_FASTOPEN flag). Corking is unimplemented, so opts.More
 	// and opts.EndOfRecord are also ignored.
@@ -1655,7 +1666,7 @@ func (e *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp
 // applied.
 // +checklocks:e.mu
 // +checklocks:e.rcvQueueMu
-func (e *endpoint) selectWindowLocked(rcvBufSize int) (wnd seqnum.Size) {
+func (e *Endpoint) selectWindowLocked(rcvBufSize int) (wnd seqnum.Size) {
 	wndFromAvailable := wndFromSpace(e.receiveBufferAvailableLocked(rcvBufSize))
 	maxWindow := wndFromSpace(rcvBufSize)
 	wndFromUsedBytes := maxWindow - e.RcvBufUsed
@@ -1678,7 +1689,7 @@ func (e *endpoint) selectWindowLocked(rcvBufSize int) (wnd seqnum.Size) {
 
 // selectWindow invokes selectWindowLocked after acquiring e.rcvQueueMu.
 // +checklocks:e.mu
-func (e *endpoint) selectWindow() (wnd seqnum.Size) {
+func (e *Endpoint) selectWindow() (wnd seqnum.Size) {
 	e.rcvQueueMu.Lock()
 	wnd = e.selectWindowLocked(int(e.ops.GetReceiveBufferSize()))
 	e.rcvQueueMu.Unlock()
@@ -1701,7 +1712,7 @@ func (e *endpoint) selectWindow() (wnd seqnum.Size) {
 //
 // +checklocks:e.mu
 // +checklocks:e.rcvQueueMu
-func (e *endpoint) windowCrossedACKThresholdLocked(deltaBefore int, rcvBufSize int) (crossed bool, above bool) {
+func (e *Endpoint) windowCrossedACKThresholdLocked(deltaBefore int, rcvBufSize int) (crossed bool, above bool) {
 	newAvail := int(e.selectWindowLocked(rcvBufSize))
 	oldAvail := newAvail - deltaBefore
 	if oldAvail < 0 {
@@ -1725,28 +1736,28 @@ func (e *endpoint) windowCrossedACKThresholdLocked(deltaBefore int, rcvBufSize i
 }
 
 // OnReuseAddressSet implements tcpip.SocketOptionsHandler.OnReuseAddressSet.
-func (e *endpoint) OnReuseAddressSet(v bool) {
+func (e *Endpoint) OnReuseAddressSet(v bool) {
 	e.LockUser()
 	e.portFlags.TupleOnly = v
 	e.UnlockUser()
 }
 
 // OnReusePortSet implements tcpip.SocketOptionsHandler.OnReusePortSet.
-func (e *endpoint) OnReusePortSet(v bool) {
+func (e *Endpoint) OnReusePortSet(v bool) {
 	e.LockUser()
 	e.portFlags.LoadBalanced = v
 	e.UnlockUser()
 }
 
 // OnKeepAliveSet implements tcpip.SocketOptionsHandler.OnKeepAliveSet.
-func (e *endpoint) OnKeepAliveSet(bool) {
+func (e *Endpoint) OnKeepAliveSet(bool) {
 	e.LockUser()
 	e.resetKeepaliveTimer(true /* receivedData */)
 	e.UnlockUser()
 }
 
 // OnDelayOptionSet implements tcpip.SocketOptionsHandler.OnDelayOptionSet.
-func (e *endpoint) OnDelayOptionSet(v bool) {
+func (e *Endpoint) OnDelayOptionSet(v bool) {
 	if !v {
 		e.LockUser()
 		defer e.UnlockUser()
@@ -1758,10 +1769,13 @@ func (e *endpoint) OnDelayOptionSet(v bool) {
 }
 
 // OnCorkOptionSet implements tcpip.SocketOptionsHandler.OnCorkOptionSet.
-func (e *endpoint) OnCorkOptionSet(v bool) {
+func (e *Endpoint) OnCorkOptionSet(v bool) {
 	if !v {
 		e.LockUser()
 		defer e.UnlockUser()
+		if e.snd != nil {
+			e.snd.corkTimer.disable()
+		}
 		// Handle the corked data.
 		if e.EndpointState().connected() {
 			e.sendData(nil /* next */)
@@ -1769,12 +1783,12 @@ func (e *endpoint) OnCorkOptionSet(v bool) {
 	}
 }
 
-func (e *endpoint) getSendBufferSize() int {
+func (e *Endpoint) getSendBufferSize() int {
 	return int(e.ops.GetSendBufferSize())
 }
 
 // OnSetReceiveBufferSize implements tcpip.SocketOptionsHandler.OnSetReceiveBufferSize.
-func (e *endpoint) OnSetReceiveBufferSize(rcvBufSz, oldSz int64) (newSz int64, postSet func()) {
+func (e *Endpoint) OnSetReceiveBufferSize(rcvBufSz, oldSz int64) (newSz int64, postSet func()) {
 	e.LockUser()
 
 	sendNonZeroWindowUpdate := false
@@ -1816,13 +1830,13 @@ func (e *endpoint) OnSetReceiveBufferSize(rcvBufSz, oldSz int64) (newSz int64, p
 }
 
 // OnSetSendBufferSize implements tcpip.SocketOptionsHandler.OnSetSendBufferSize.
-func (e *endpoint) OnSetSendBufferSize(sz int64) int64 {
+func (e *Endpoint) OnSetSendBufferSize(sz int64) int64 {
 	e.sndQueueInfo.TCPSndBufState.AutoTuneSndBufDisabled.Store(1)
 	return sz
 }
 
 // WakeupWriters implements tcpip.SocketOptionsHandler.WakeupWriters.
-func (e *endpoint) WakeupWriters() {
+func (e *Endpoint) WakeupWriters() {
 	e.LockUser()
 	defer e.UnlockUser()
 
@@ -1837,7 +1851,7 @@ func (e *endpoint) WakeupWriters() {
 }
 
 // SetSockOptInt sets a socket option.
-func (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error {
+func (e *Endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error {
 	// Lower 2 bits represents ECN bits. RFC 3168, section 23.1
 	const inetECNMask = 3
 
@@ -1874,9 +1888,16 @@ func (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error {
 		e.UnlockUser()
 
 	case tcpip.MTUDiscoverOption:
-		// Return not supported if attempting to set this option to
-		// anything other than path MTU discovery disabled.
-		if v != tcpip.PMTUDiscoveryDont {
+		switch v := tcpip.PMTUDStrategy(v); v {
+		case tcpip.PMTUDiscoveryWant, tcpip.PMTUDiscoveryDont, tcpip.PMTUDiscoveryDo:
+			e.LockUser()
+			e.pmtud = v
+			e.UnlockUser()
+		case tcpip.PMTUDiscoveryProbe:
+			// We don't support a way to ignore MTU updates; it's
+			// either on or it's off.
+			return &tcpip.ErrNotSupported{}
+		default:
 			return &tcpip.ErrNotSupported{}
 		}
 
@@ -1924,12 +1945,13 @@ func (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error {
 	return nil
 }
 
-func (e *endpoint) HasNIC(id int32) bool {
+// HasNIC returns true if the NICID is defined in the stack or id is 0.
+func (e *Endpoint) HasNIC(id int32) bool {
 	return id == 0 || e.stack.HasNIC(tcpip.NICID(id))
 }
 
 // SetSockOpt sets a socket option.
-func (e *endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error {
+func (e *Endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error {
 	switch v := opt.(type) {
 	case *tcpip.KeepaliveIdleOption:
 		e.LockUser()
@@ -2022,7 +2044,7 @@ func (e *endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error {
 }
 
 // readyReceiveSize returns the number of bytes ready to be received.
-func (e *endpoint) readyReceiveSize() (int, tcpip.Error) {
+func (e *Endpoint) readyReceiveSize() (int, tcpip.Error) {
 	e.LockUser()
 	defer e.UnlockUser()
 
@@ -2038,7 +2060,7 @@ func (e *endpoint) readyReceiveSize() (int, tcpip.Error) {
 }
 
 // GetSockOptInt implements tcpip.Endpoint.GetSockOptInt.
-func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {
+func (e *Endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {
 	switch opt {
 	case tcpip.KeepaliveCountOption:
 		e.keepalive.Lock()
@@ -2072,9 +2094,10 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {
 		return v, nil
 
 	case tcpip.MTUDiscoverOption:
-		// Always return the path MTU discovery disabled setting since
-		// it's the only one supported.
-		return tcpip.PMTUDiscoveryDont, nil
+		e.LockUser()
+		v := e.pmtud
+		e.UnlockUser()
+		return int(v), nil
 
 	case tcpip.ReceiveQueueSizeOption:
 		return e.readyReceiveSize()
@@ -2111,7 +2134,7 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) {
 	}
 }
 
-func (e *endpoint) getTCPInfo() tcpip.TCPInfoOption {
+func (e *Endpoint) getTCPInfo() tcpip.TCPInfoOption {
 	info := tcpip.TCPInfoOption{}
 	e.LockUser()
 	if state := e.EndpointState(); state.internal() {
@@ -2140,7 +2163,7 @@ func (e *endpoint) getTCPInfo() tcpip.TCPInfoOption {
 }
 
 // GetSockOpt implements tcpip.Endpoint.GetSockOpt.
-func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error {
+func (e *Endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error {
 	switch o := opt.(type) {
 	case *tcpip.TCPInfoOption:
 		*o = e.getTCPInfo()
@@ -2197,8 +2220,8 @@ func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error {
 // checkV4MappedLocked determines the effective network protocol and converts
 // addr to its canonical form.
 // +checklocks:e.mu
-func (e *endpoint) checkV4MappedLocked(addr tcpip.FullAddress) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) {
-	unwrapped, netProto, err := e.TransportEndpointInfo.AddrNetProtoLocked(addr, e.ops.GetV6Only())
+func (e *Endpoint) checkV4MappedLocked(addr tcpip.FullAddress, bind bool) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) {
+	unwrapped, netProto, err := e.TransportEndpointInfo.AddrNetProtoLocked(addr, e.ops.GetV6Only(), bind)
 	if err != nil {
 		return tcpip.FullAddress{}, 0, err
 	}
@@ -2206,12 +2229,12 @@ func (e *endpoint) checkV4MappedLocked(addr tcpip.FullAddress) (tcpip.FullAddres
 }
 
 // Disconnect implements tcpip.Endpoint.Disconnect.
-func (*endpoint) Disconnect() tcpip.Error {
+func (*Endpoint) Disconnect() tcpip.Error {
 	return &tcpip.ErrNotSupported{}
 }
 
 // Connect connects the endpoint to its peer.
-func (e *endpoint) Connect(addr tcpip.FullAddress) tcpip.Error {
+func (e *Endpoint) Connect(addr tcpip.FullAddress) tcpip.Error {
 	e.LockUser()
 	defer e.UnlockUser()
 	err := e.connect(addr, true)
@@ -2229,7 +2252,7 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) tcpip.Error {
 // registerEndpoint registers the endpoint with the provided address.
 //
 // +checklocks:e.mu
-func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.NetworkProtocolNumber, nicID tcpip.NICID) tcpip.Error {
+func (e *Endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.NetworkProtocolNumber, nicID tcpip.NICID) tcpip.Error {
 	netProtos := []tcpip.NetworkProtocolNumber{netProto}
 	if e.TransportEndpointInfo.ID.LocalPort != 0 {
 		// The endpoint is bound to a port, attempt to register it.
@@ -2244,28 +2267,6 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo
 		// endpoint would be trying to connect to itself).
 		sameAddr := e.TransportEndpointInfo.ID.LocalAddress == e.TransportEndpointInfo.ID.RemoteAddress
 
-		// Calculate a port offset based on the destination IP/port and
-		// src IP to ensure that for a given tuple (srcIP, destIP,
-		// destPort) the offset used as a starting point is the same to
-		// ensure that we can cycle through the port space effectively.
-		portBuf := make([]byte, 2)
-		binary.LittleEndian.PutUint16(portBuf, e.ID.RemotePort)
-
-		h := jenkins.Sum32(e.protocol.portOffsetSecret)
-		for _, s := range [][]byte{
-			e.ID.LocalAddress.AsSlice(),
-			e.ID.RemoteAddress.AsSlice(),
-			portBuf,
-		} {
-			// Per io.Writer.Write:
-			//
-			// Write must return a non-nil error if it returns n < len(p).
-			if _, err := h.Write(s); err != nil {
-				panic(err)
-			}
-		}
-		portOffset := h.Sum32()
-
 		var twReuse tcpip.TCPTimeWaitReuseOption
 		if err := e.stack.TransportProtocolOption(ProtocolNumber, &twReuse); err != nil {
 			panic(fmt.Sprintf("e.stack.TransportProtocolOption(%d, %#v) = %s", ProtocolNumber, &twReuse, err))
@@ -2282,7 +2283,7 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo
 		}
 
 		bindToDevice := tcpip.NICID(e.ops.GetBindToDevice())
-		if _, err := e.stack.PickEphemeralPortStable(portOffset, func(p uint16) (bool, tcpip.Error) {
+		if _, err := e.stack.PickEphemeralPort(e.stack.SecureRNG(), func(p uint16) (bool, tcpip.Error) {
 			if sameAddr && p == e.TransportEndpointInfo.ID.RemotePort {
 				return false, nil
 			}
@@ -2295,7 +2296,7 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo
 				BindToDevice: bindToDevice,
 				Dest:         addr,
 			}
-			if _, err := e.stack.ReservePort(e.stack.Rand(), portRes, nil /* testPort */); err != nil {
+			if _, err := e.stack.ReservePort(e.stack.SecureRNG(), portRes, nil /* testPort */); err != nil {
 				if _, ok := err.(*tcpip.ErrPortInUse); !ok || !reuse {
 					return false, nil
 				}
@@ -2316,7 +2317,7 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo
 					return false, nil
 				}
 
-				tcpEP := transEP.(*endpoint)
+				tcpEP := transEP.(*Endpoint)
 				tcpEP.LockUser()
 				// If the endpoint is not in TIME-WAIT or if it is in TIME-WAIT but
 				// less than 1 second has elapsed since its recentTS was updated then
@@ -2342,7 +2343,7 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo
 					BindToDevice: bindToDevice,
 					Dest:         addr,
 				}
-				if _, err := e.stack.ReservePort(e.stack.Rand(), portRes, nil /* testPort */); err != nil {
+				if _, err := e.stack.ReservePort(e.stack.SecureRNG(), portRes, nil /* testPort */); err != nil {
 					return false, nil
 				}
 			}
@@ -2384,10 +2385,10 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo
 
 // connect connects the endpoint to its peer.
 // +checklocks:e.mu
-func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool) tcpip.Error {
+func (e *Endpoint) connect(addr tcpip.FullAddress, handshake bool) tcpip.Error {
 	connectingAddr := addr.Addr
 
-	addr, netProto, err := e.checkV4MappedLocked(addr)
+	addr, netProto, err := e.checkV4MappedLocked(addr, false /* bind */)
 	if err != nil {
 		return err
 	}
@@ -2497,13 +2498,13 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool) tcpip.Error {
 }
 
 // ConnectEndpoint is not supported.
-func (*endpoint) ConnectEndpoint(tcpip.Endpoint) tcpip.Error {
+func (*Endpoint) ConnectEndpoint(tcpip.Endpoint) tcpip.Error {
 	return &tcpip.ErrInvalidEndpointState{}
 }
 
 // Shutdown closes the read and/or write end of the endpoint connection to its
 // peer.
-func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) tcpip.Error {
+func (e *Endpoint) Shutdown(flags tcpip.ShutdownFlags) tcpip.Error {
 	e.LockUser()
 	defer e.UnlockUser()
 
@@ -2521,7 +2522,7 @@ func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) tcpip.Error {
 }
 
 // +checklocks:e.mu
-func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error {
+func (e *Endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error {
 	e.shutdownFlags |= flags
 	switch {
 	case e.EndpointState().connected():
@@ -2603,7 +2604,7 @@ func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error {
 
 // Listen puts the endpoint in "listen" mode, which allows it to accept
 // new connections.
-func (e *endpoint) Listen(backlog int) tcpip.Error {
+func (e *Endpoint) Listen(backlog int) tcpip.Error {
 	if err := e.listen(backlog); err != nil {
 		if !err.IgnoreStats() {
 			e.stack.Stats().TCP.FailedConnectionAttempts.Increment()
@@ -2614,7 +2615,7 @@ func (e *endpoint) Listen(backlog int) tcpip.Error {
 	return nil
 }
 
-func (e *endpoint) listen(backlog int) tcpip.Error {
+func (e *Endpoint) listen(backlog int) tcpip.Error {
 	e.LockUser()
 	defer e.UnlockUser()
 
@@ -2630,7 +2631,7 @@ func (e *endpoint) listen(backlog int) tcpip.Error {
 		e.acceptQueue.capacity = backlog
 
 		if e.acceptQueue.pendingEndpoints == nil {
-			e.acceptQueue.pendingEndpoints = make(map[*endpoint]struct{})
+			e.acceptQueue.pendingEndpoints = make(map[*Endpoint]struct{})
 		}
 
 		e.shutdownFlags = 0
@@ -2675,7 +2676,7 @@ func (e *endpoint) listen(backlog int) tcpip.Error {
 	// endpoints.
 	e.acceptMu.Lock()
 	if e.acceptQueue.pendingEndpoints == nil {
-		e.acceptQueue.pendingEndpoints = make(map[*endpoint]struct{})
+		e.acceptQueue.pendingEndpoints = make(map[*Endpoint]struct{})
 	}
 	if e.acceptQueue.capacity == 0 {
 		e.acceptQueue.capacity = backlog
@@ -2693,7 +2694,7 @@ func (e *endpoint) listen(backlog int) tcpip.Error {
 // to an endpoint previously set to listen mode.
 //
 // addr if not-nil will contain the peer address of the returned endpoint.
-func (e *endpoint) Accept(peerAddr *tcpip.FullAddress) (tcpip.Endpoint, *waiter.Queue, tcpip.Error) {
+func (e *Endpoint) Accept(peerAddr *tcpip.FullAddress) (tcpip.Endpoint, *waiter.Queue, tcpip.Error) {
 	e.LockUser()
 	defer e.UnlockUser()
 
@@ -2706,10 +2707,10 @@ func (e *endpoint) Accept(peerAddr *tcpip.FullAddress) (tcpip.Endpoint, *waiter.
 	}
 
 	// Get the new accepted endpoint.
-	var n *endpoint
+	var n *Endpoint
 	e.acceptMu.Lock()
 	if element := e.acceptQueue.endpoints.Front(); element != nil {
-		n = e.acceptQueue.endpoints.Remove(element).(*endpoint)
+		n = e.acceptQueue.endpoints.Remove(element).(*Endpoint)
 	}
 	e.acceptMu.Unlock()
 	if n == nil {
@@ -2722,7 +2723,7 @@ func (e *endpoint) Accept(peerAddr *tcpip.FullAddress) (tcpip.Endpoint, *waiter.
 }
 
 // Bind binds the endpoint to a specific local port and optionally address.
-func (e *endpoint) Bind(addr tcpip.FullAddress) (err tcpip.Error) {
+func (e *Endpoint) Bind(addr tcpip.FullAddress) (err tcpip.Error) {
 	e.LockUser()
 	defer e.UnlockUser()
 
@@ -2730,7 +2731,7 @@ func (e *endpoint) Bind(addr tcpip.FullAddress) (err tcpip.Error) {
 }
 
 // +checklocks:e.mu
-func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) {
+func (e *Endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) {
 	// Don't allow binding once endpoint is not in the initial state
 	// anymore. This is because once the endpoint goes into a connected or
 	// listen state, it is already bound.
@@ -2739,7 +2740,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) {
 	}
 
 	e.BindAddr = addr.Addr
-	addr, netProto, err := e.checkV4MappedLocked(addr)
+	addr, netProto, err := e.checkV4MappedLocked(addr, true /* bind */)
 	if err != nil {
 		return err
 	}
@@ -2778,7 +2779,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) {
 		BindToDevice: bindToDevice,
 		Dest:         tcpip.FullAddress{},
 	}
-	port, err := e.stack.ReservePort(e.stack.Rand(), portRes, func(p uint16) (bool, tcpip.Error) {
+	port, err := e.stack.ReservePort(e.stack.SecureRNG(), portRes, func(p uint16) (bool, tcpip.Error) {
 		id := e.TransportEndpointInfo.ID
 		id.LocalPort = p
 		// CheckRegisterTransportEndpoint should only return an error if there is a
@@ -2814,7 +2815,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) {
 }
 
 // GetLocalAddress returns the address to which the endpoint is bound.
-func (e *endpoint) GetLocalAddress() (tcpip.FullAddress, tcpip.Error) {
+func (e *Endpoint) GetLocalAddress() (tcpip.FullAddress, tcpip.Error) {
 	e.LockUser()
 	defer e.UnlockUser()
 
@@ -2826,7 +2827,7 @@ func (e *endpoint) GetLocalAddress() (tcpip.FullAddress, tcpip.Error) {
 }
 
 // GetRemoteAddress returns the address to which the endpoint is connected.
-func (e *endpoint) GetRemoteAddress() (tcpip.FullAddress, tcpip.Error) {
+func (e *Endpoint) GetRemoteAddress() (tcpip.FullAddress, tcpip.Error) {
 	e.LockUser()
 	defer e.UnlockUser()
 
@@ -2837,7 +2838,7 @@ func (e *endpoint) GetRemoteAddress() (tcpip.FullAddress, tcpip.Error) {
 	return e.getRemoteAddress(), nil
 }
 
-func (e *endpoint) getRemoteAddress() tcpip.FullAddress {
+func (e *Endpoint) getRemoteAddress() tcpip.FullAddress {
 	return tcpip.FullAddress{
 		Addr: e.TransportEndpointInfo.ID.RemoteAddress,
 		Port: e.TransportEndpointInfo.ID.RemotePort,
@@ -2845,14 +2846,15 @@ func (e *endpoint) getRemoteAddress() tcpip.FullAddress {
 	}
 }
 
-func (*endpoint) HandlePacket(stack.TransportEndpointID, stack.PacketBufferPtr) {
+// HandlePacket implements stack.TransportEndpoint.HandlePacket.
+func (*Endpoint) HandlePacket(stack.TransportEndpointID, *stack.PacketBuffer) {
 	// TCP HandlePacket is not required anymore as inbound packets first
 	// land at the Dispatcher which then can either deliver using the
 	// worker go routine or directly do the invoke the tcp processing inline
 	// based on the state of the endpoint.
 }
 
-func (e *endpoint) enqueueSegment(s *segment) bool {
+func (e *Endpoint) enqueueSegment(s *segment) bool {
 	// Send packet to worker goroutine.
 	if !e.segmentQueue.enqueue(s) {
 		// The queue is full, so we drop the segment.
@@ -2863,7 +2865,7 @@ func (e *endpoint) enqueueSegment(s *segment) bool {
 	return true
 }
 
-func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, pkt stack.PacketBufferPtr) {
+func (e *Endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, pkt *stack.PacketBuffer) {
 	// Update last error first.
 	e.lastErrorMu.Lock()
 	e.lastError = err
@@ -2920,7 +2922,7 @@ func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, p
 }
 
 // HandleError implements stack.TransportEndpoint.
-func (e *endpoint) HandleError(transErr stack.TransportError, pkt stack.PacketBufferPtr) {
+func (e *Endpoint) HandleError(transErr stack.TransportError, pkt *stack.PacketBuffer) {
 	handlePacketTooBig := func(mtu uint32) {
 		e.sndQueueInfo.sndQueueMu.Lock()
 		update := false
@@ -2962,7 +2964,7 @@ func (e *endpoint) HandleError(transErr stack.TransportError, pkt stack.PacketBu
 
 // updateSndBufferUsage is called by the protocol goroutine when room opens up
 // in the send buffer. The number of newly available bytes is v.
-func (e *endpoint) updateSndBufferUsage(v int) {
+func (e *Endpoint) updateSndBufferUsage(v int) {
 	sendBufferSize := e.getSendBufferSize()
 	e.sndQueueInfo.sndQueueMu.Lock()
 	notify := e.sndQueueInfo.SndBufUsed >= sendBufferSize>>1
@@ -2990,7 +2992,7 @@ func (e *endpoint) updateSndBufferUsage(v int) {
 // s will be nil).
 //
 // +checklocks:e.mu
-func (e *endpoint) readyToRead(s *segment) {
+func (e *Endpoint) readyToRead(s *segment) {
 	e.rcvQueueMu.Lock()
 	if s != nil {
 		e.RcvBufUsed += s.payloadSize()
@@ -3006,7 +3008,7 @@ func (e *endpoint) readyToRead(s *segment) {
 // receiveBufferAvailableLocked calculates how many bytes are still available
 // in the receive buffer.
 // +checklocks:e.rcvQueueMu
-func (e *endpoint) receiveBufferAvailableLocked(rcvBufSize int) int {
+func (e *Endpoint) receiveBufferAvailableLocked(rcvBufSize int) int {
 	// We may use more bytes than the buffer size when the receive buffer
 	// shrinks.
 	memUsed := e.receiveMemUsed()
@@ -3020,7 +3022,7 @@ func (e *endpoint) receiveBufferAvailableLocked(rcvBufSize int) int {
 // receiveBufferAvailable calculates how many bytes are still available in the
 // receive buffer based on the actual memory used by all segments held in
 // receive buffer/pending and segment queue.
-func (e *endpoint) receiveBufferAvailable() int {
+func (e *Endpoint) receiveBufferAvailable() int {
 	e.rcvQueueMu.Lock()
 	available := e.receiveBufferAvailableLocked(int(e.ops.GetReceiveBufferSize()))
 	e.rcvQueueMu.Unlock()
@@ -3028,7 +3030,7 @@ func (e *endpoint) receiveBufferAvailable() int {
 }
 
 // receiveBufferUsed returns the amount of in-use receive buffer.
-func (e *endpoint) receiveBufferUsed() int {
+func (e *Endpoint) receiveBufferUsed() int {
 	e.rcvQueueMu.Lock()
 	used := e.RcvBufUsed
 	e.rcvQueueMu.Unlock()
@@ -3037,18 +3039,18 @@ func (e *endpoint) receiveBufferUsed() int {
 
 // receiveMemUsed returns the total memory in use by segments held by this
 // endpoint.
-func (e *endpoint) receiveMemUsed() int {
+func (e *Endpoint) receiveMemUsed() int {
 	return int(e.rcvMemUsed.Load())
 }
 
 // updateReceiveMemUsed adds the provided delta to e.rcvMemUsed.
-func (e *endpoint) updateReceiveMemUsed(delta int) {
+func (e *Endpoint) updateReceiveMemUsed(delta int) {
 	e.rcvMemUsed.Add(int32(delta))
 }
 
 // maxReceiveBufferSize returns the stack wide maximum receive buffer size for
 // an endpoint.
-func (e *endpoint) maxReceiveBufferSize() int {
+func (e *Endpoint) maxReceiveBufferSize() int {
 	var rs tcpip.TCPReceiveBufferSizeRangeOption
 	if err := e.stack.TransportProtocolOption(ProtocolNumber, &rs); err != nil {
 		// As a fallback return the hardcoded max buffer size.
@@ -3058,12 +3060,12 @@ func (e *endpoint) maxReceiveBufferSize() int {
 }
 
 // directionState returns the close state of send and receive part of the endpoint
-func (e *endpoint) connDirectionState() connDirectionState {
+func (e *Endpoint) connDirectionState() connDirectionState {
 	return connDirectionState(e.connectionDirectionState.Load())
 }
 
 // updateDirectionState updates the close state of send and receive part of the endpoint
-func (e *endpoint) updateConnDirectionState(state connDirectionState) connDirectionState {
+func (e *Endpoint) updateConnDirectionState(state connDirectionState) connDirectionState {
 	return connDirectionState(e.connectionDirectionState.Swap(uint32(e.connDirectionState() | state)))
 }
 
@@ -3072,7 +3074,7 @@ func (e *endpoint) updateConnDirectionState(state connDirectionState) connDirect
 // disabled then the window scaling factor is based on the size of the
 // receiveBuffer otherwise we use the max permissible receive buffer size to
 // compute the scale.
-func (e *endpoint) rcvWndScaleForHandshake() int {
+func (e *Endpoint) rcvWndScaleForHandshake() int {
 	bufSizeForScale := e.ops.GetReceiveBufferSize()
 
 	e.rcvQueueMu.Lock()
@@ -3087,7 +3089,7 @@ func (e *endpoint) rcvWndScaleForHandshake() int {
 
 // updateRecentTimestamp updates the recent timestamp using the algorithm
 // described in https://tools.ietf.org/html/rfc7323#section-4.3
-func (e *endpoint) updateRecentTimestamp(tsVal uint32, maxSentAck seqnum.Value, segSeq seqnum.Value) {
+func (e *Endpoint) updateRecentTimestamp(tsVal uint32, maxSentAck seqnum.Value, segSeq seqnum.Value) {
 	if e.SendTSOk && seqnum.Value(e.recentTimestamp()).LessThan(seqnum.Value(tsVal)) && segSeq.LessThanEq(maxSentAck) {
 		e.setRecentTimestamp(tsVal)
 	}
@@ -3096,29 +3098,29 @@ func (e *endpoint) updateRecentTimestamp(tsVal uint32, maxSentAck seqnum.Value,
 // maybeEnableTimestamp marks the timestamp option enabled for this endpoint if
 // the SYN options indicate that timestamp option was negotiated. It also
 // initializes the recentTS with the value provided in synOpts.TSval.
-func (e *endpoint) maybeEnableTimestamp(synOpts header.TCPSynOptions) {
+func (e *Endpoint) maybeEnableTimestamp(synOpts header.TCPSynOptions) {
 	if synOpts.TS {
 		e.SendTSOk = true
 		e.setRecentTimestamp(synOpts.TSVal)
 	}
 }
 
-func (e *endpoint) tsVal(now tcpip.MonotonicTime) uint32 {
+func (e *Endpoint) tsVal(now tcpip.MonotonicTime) uint32 {
 	return e.TSOffset.TSVal(now)
 }
 
-func (e *endpoint) tsValNow() uint32 {
+func (e *Endpoint) tsValNow() uint32 {
 	return e.tsVal(e.stack.Clock().NowMonotonic())
 }
 
-func (e *endpoint) elapsed(now tcpip.MonotonicTime, tsEcr uint32) time.Duration {
+func (e *Endpoint) elapsed(now tcpip.MonotonicTime, tsEcr uint32) time.Duration {
 	return e.TSOffset.Elapsed(now, tsEcr)
 }
 
 // maybeEnableSACKPermitted marks the SACKPermitted option enabled for this endpoint
 // if the SYN options indicate that the SACK option was negotiated and the TCP
 // stack is configured to enable TCP SACK option.
-func (e *endpoint) maybeEnableSACKPermitted(synOpts header.TCPSynOptions) {
+func (e *Endpoint) maybeEnableSACKPermitted(synOpts header.TCPSynOptions) {
 	var v tcpip.TCPSACKEnabled
 	if err := e.stack.TransportProtocolOption(ProtocolNumber, &v); err != nil {
 		// Stack doesn't support SACK. So just return.
@@ -3131,7 +3133,7 @@ func (e *endpoint) maybeEnableSACKPermitted(synOpts header.TCPSynOptions) {
 }
 
 // maxOptionSize return the maximum size of TCP options.
-func (e *endpoint) maxOptionSize() (size int) {
+func (e *Endpoint) maxOptionSize() (size int) {
 	var maxSackBlocks [header.TCPMaxSACKBlocks]header.SACKBlock
 	options := e.makeOptions(maxSackBlocks[:])
 	size = len(options)
@@ -3144,7 +3146,7 @@ func (e *endpoint) maxOptionSize() (size int) {
 // used before invoking the probe.
 //
 // +checklocks:e.mu
-func (e *endpoint) completeStateLocked(s *stack.TCPEndpointState) {
+func (e *Endpoint) completeStateLocked(s *stack.TCPEndpointState) {
 	s.TCPEndpointStateInner = e.TCPEndpointStateInner
 	s.ID = stack.TCPEndpointID(e.TransportEndpointInfo.ID)
 	s.SegTime = e.stack.Clock().NowMonotonic()
@@ -3182,7 +3184,7 @@ func (e *endpoint) completeStateLocked(s *stack.TCPEndpointState) {
 	s.Sender.SpuriousRecovery = e.snd.spuriousRecovery
 }
 
-func (e *endpoint) initHostGSO() {
+func (e *Endpoint) initHostGSO() {
 	switch e.route.NetProto() {
 	case header.IPv4ProtocolNumber:
 		e.gso.Type = stack.GSOTCPv4
@@ -3198,10 +3200,10 @@ func (e *endpoint) initHostGSO() {
 	e.gso.MaxSize = e.route.GSOMaxSize()
 }
 
-func (e *endpoint) initGSO() {
+func (e *Endpoint) initGSO() {
 	if e.route.HasHostGSOCapability() {
 		e.initHostGSO()
-	} else if e.route.HasGvisorGSOCapability() {
+	} else if e.route.HasGVisorGSOCapability() {
 		e.gso = stack.GSO{
 			MaxSize:   e.route.GSOMaxSize(),
 			Type:      stack.GSOGvisor,
@@ -3212,12 +3214,12 @@ func (e *endpoint) initGSO() {
 
 // State implements tcpip.Endpoint.State. It exports the endpoint's protocol
 // state for diagnostics.
-func (e *endpoint) State() uint32 {
+func (e *Endpoint) State() uint32 {
 	return uint32(e.EndpointState())
 }
 
 // Info returns a copy of the endpoint info.
-func (e *endpoint) Info() tcpip.EndpointInfo {
+func (e *Endpoint) Info() tcpip.EndpointInfo {
 	e.LockUser()
 	// Make a copy of the endpoint info.
 	ret := e.TransportEndpointInfo
@@ -3226,12 +3228,12 @@ func (e *endpoint) Info() tcpip.EndpointInfo {
 }
 
 // Stats returns a pointer to the endpoint stats.
-func (e *endpoint) Stats() tcpip.EndpointStats {
+func (e *Endpoint) Stats() tcpip.EndpointStats {
 	return &e.stats
 }
 
 // Wait implements stack.TransportEndpoint.Wait.
-func (e *endpoint) Wait() {
+func (e *Endpoint) Wait() {
 	waitEntry, notifyCh := waiter.NewChannelEntry(waiter.EventHUp)
 	e.waiterQueue.EventRegister(&waitEntry)
 	defer e.waiterQueue.EventUnregister(&waitEntry)
@@ -3243,17 +3245,15 @@ func (e *endpoint) Wait() {
 }
 
 // SocketOptions implements tcpip.Endpoint.SocketOptions.
-func (e *endpoint) SocketOptions() *tcpip.SocketOptions {
+func (e *Endpoint) SocketOptions() *tcpip.SocketOptions {
 	return &e.ops
 }
 
 // GetTCPSendBufferLimits is used to get send buffer size limits for TCP.
-func GetTCPSendBufferLimits(s tcpip.StackHandler) tcpip.SendBufferSizeOption {
-	var ss tcpip.TCPSendBufferSizeRangeOption
-	if err := s.TransportProtocolOption(header.TCPProtocolNumber, &ss); err != nil {
-		panic(fmt.Sprintf("s.TransportProtocolOption(%d, %#v) = %s", header.TCPProtocolNumber, ss, err))
-	}
-
+func GetTCPSendBufferLimits(sh tcpip.StackHandler) tcpip.SendBufferSizeOption {
+	// This type assertion is safe because only the TCP stack calls this
+	// function.
+	ss := sh.(*stack.Stack).TCPSendBufferLimits()
 	return tcpip.SendBufferSizeOption{
 		Min:     ss.Min,
 		Default: ss.Default,
@@ -3262,7 +3262,7 @@ func GetTCPSendBufferLimits(s tcpip.StackHandler) tcpip.SendBufferSizeOption {
 }
 
 // allowOutOfWindowAck returns true if an out-of-window ACK can be sent now.
-func (e *endpoint) allowOutOfWindowAck() bool {
+func (e *Endpoint) allowOutOfWindowAck() bool {
 	now := e.stack.Clock().NowMonotonic()
 
 	if e.lastOutOfWindowAckTime != (tcpip.MonotonicTime{}) {
@@ -3295,7 +3295,7 @@ func GetTCPReceiveBufferLimits(s tcpip.StackHandler) tcpip.ReceiveBufferSizeOpti
 
 // computeTCPSendBufferSize implements auto tuning of send buffer size and
 // returns the new send buffer size.
-func (e *endpoint) computeTCPSendBufferSize() int64 {
+func (e *Endpoint) computeTCPSendBufferSize() int64 {
 	curSndBufSz := int64(e.getSendBufferSize())
 
 	// Auto tuning is disabled when the user explicitly sets the send
@@ -3325,3 +3325,8 @@ func (e *endpoint) computeTCPSendBufferSize() int64 {
 
 	return newSndBufSz
 }
+
+// GetAcceptConn implements tcpip.SocketOptionsHandler.
+func (e *Endpoint) GetAcceptConn() bool {
+	return EndpointState(e.State()) == StateListen
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go
index 8382b35b..63457f7f 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go
@@ -15,6 +15,7 @@
 package tcp
 
 import (
+	"context"
 	"fmt"
 
 	"gvisor.dev/gvisor/pkg/atomicbitops"
@@ -26,7 +27,7 @@ import (
 )
 
 // beforeSave is invoked by stateify.
-func (e *endpoint) beforeSave() {
+func (e *Endpoint) beforeSave() {
 	// Stop incoming packets.
 	e.segmentQueue.freeze()
 
@@ -56,26 +57,28 @@ func (e *endpoint) beforeSave() {
 	default:
 		panic(fmt.Sprintf("endpoint in unknown state %v", e.EndpointState()))
 	}
+
+	e.stack.RegisterResumableEndpoint(e)
 }
 
 // saveEndpoints is invoked by stateify.
-func (a *acceptQueue) saveEndpoints() []*endpoint {
-	acceptedEndpoints := make([]*endpoint, a.endpoints.Len())
+func (a *acceptQueue) saveEndpoints() []*Endpoint {
+	acceptedEndpoints := make([]*Endpoint, a.endpoints.Len())
 	for i, e := 0, a.endpoints.Front(); e != nil; i, e = i+1, e.Next() {
-		acceptedEndpoints[i] = e.Value.(*endpoint)
+		acceptedEndpoints[i] = e.Value.(*Endpoint)
 	}
 	return acceptedEndpoints
 }
 
 // loadEndpoints is invoked by stateify.
-func (a *acceptQueue) loadEndpoints(acceptedEndpoints []*endpoint) {
+func (a *acceptQueue) loadEndpoints(_ context.Context, acceptedEndpoints []*Endpoint) {
 	for _, ep := range acceptedEndpoints {
 		a.endpoints.PushBack(ep)
 	}
 }
 
 // saveState is invoked by stateify.
-func (e *endpoint) saveState() EndpointState {
+func (e *Endpoint) saveState() EndpointState {
 	return e.EndpointState()
 }
 
@@ -89,7 +92,7 @@ var connectingLoading sync.WaitGroup
 // Bound endpoint loading happens last.
 
 // loadState is invoked by stateify.
-func (e *endpoint) loadState(epState EndpointState) {
+func (e *Endpoint) loadState(_ context.Context, epState EndpointState) {
 	// This is to ensure that the loading wait groups include all applicable
 	// endpoints before any asynchronous calls to the Wait() methods.
 	// For restore purposes we treat TimeWait like a connected endpoint.
@@ -109,24 +112,25 @@ func (e *endpoint) loadState(epState EndpointState) {
 }
 
 // afterLoad is invoked by stateify.
-func (e *endpoint) afterLoad() {
+func (e *Endpoint) afterLoad(ctx context.Context) {
 	// RacyLoad() can be used because we are initializing e.
 	e.origEndpointState = e.state.RacyLoad()
 	// Restore the endpoint to InitialState as it will be moved to
-	// its origEndpointState during Resume.
+	// its origEndpointState during Restore.
 	e.state = atomicbitops.FromUint32(uint32(StateInitial))
-	stack.StackFromEnv.RegisterRestoredEndpoint(e)
+	stack.RestoreStackFromContext(ctx).RegisterRestoredEndpoint(e)
 }
 
-// Resume implements tcpip.ResumableEndpoint.Resume.
-func (e *endpoint) Resume(s *stack.Stack) {
+// Restore implements tcpip.RestoredEndpoint.Restore.
+func (e *Endpoint) Restore(s *stack.Stack) {
 	if !e.EndpointState().closed() {
-		e.keepalive.timer.init(s.Clock(), maybeFailTimerHandler(e, e.keepaliveTimerExpired))
+		e.keepalive.timer.init(s.Clock(), timerHandler(e, e.keepaliveTimerExpired))
 	}
 	if snd := e.snd; snd != nil {
-		snd.resendTimer.init(s.Clock(), maybeFailTimerHandler(e, e.snd.retransmitTimerExpired))
+		snd.resendTimer.init(s.Clock(), timerHandler(e, e.snd.retransmitTimerExpired))
 		snd.reorderTimer.init(s.Clock(), timerHandler(e, e.snd.rc.reorderTimerExpired))
 		snd.probeTimer.init(s.Clock(), timerHandler(e, e.snd.probeTimerExpired))
+		snd.corkTimer.init(s.Clock(), timerHandler(e, e.snd.corkTimerExpired))
 	}
 	e.stack = s
 	e.protocol = protocolFromStack(s)
@@ -136,7 +140,7 @@ func (e *endpoint) Resume(s *stack.Stack) {
 	bind := func() {
 		e.mu.Lock()
 		defer e.mu.Unlock()
-		addr, _, err := e.checkV4MappedLocked(tcpip.FullAddress{Addr: e.BindAddr, Port: e.TransportEndpointInfo.ID.LocalPort})
+		addr, _, err := e.checkV4MappedLocked(tcpip.FullAddress{Addr: e.BindAddr, Port: e.TransportEndpointInfo.ID.LocalPort}, true /* bind */)
 		if err != nil {
 			panic("unable to parse BindAddr: " + err.String())
 		}
@@ -193,6 +197,11 @@ func (e *endpoint) Resume(s *stack.Stack) {
 			e.timeWaitTimer = e.stack.Clock().AfterFunc(e.getTimeWaitDuration(), e.timeWaitTimerExpired)
 		}
 
+		if e.ops.GetCorkOption() {
+			// Rearm the timer if TCP_CORK is enabled which will
+			// drain all the segments in the queue after restore.
+			e.snd.corkTimer.enable(MinRTO)
+		}
 		e.mu.Unlock()
 		connectedLoading.Done()
 	case epState == StateListen:
@@ -243,7 +252,7 @@ func (e *endpoint) Resume(s *stack.Stack) {
 			panic(fmt.Sprintf("FindRoute failed when restoring endpoint w/ ID: %+v", e.ID))
 		}
 		e.route = r
-		timer, err := newBackoffTimer(e.stack.Clock(), InitialRTO, MaxRTO, maybeFailTimerHandler(e, e.h.retransmitHandlerLocked))
+		timer, err := newBackoffTimer(e.stack.Clock(), InitialRTO, MaxRTO, timerHandler(e, e.h.retransmitHandlerLocked))
 		if err != nil {
 			panic(fmt.Sprintf("newBackOffTimer(_, %s, %s, _) failed: %s", InitialRTO, MaxRTO, err))
 		}
@@ -269,3 +278,8 @@ func (e *endpoint) Resume(s *stack.Stack) {
 		tcpip.DeleteDanglingEndpoint(e)
 	}
 }
+
+// Resume implements tcpip.ResumableEndpoint.Resume.
+func (e *Endpoint) Resume() {
+	e.segmentQueue.thaw()
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go
index 0071093f..39a52215 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go
@@ -64,7 +64,7 @@ func NewForwarder(s *stack.Stack, rcvWnd, maxInFlight int, handler func(*Forward
 //
 // This function is expected to be passed as an argument to the
 // stack.SetTransportProtocolHandler function.
-func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) bool {
+func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
 	s, err := newIncomingSegment(id, f.stack.Clock(), pkt)
 	if err != nil {
 		return false
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go
index 81059d6a..e3f760b0 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go
@@ -16,13 +16,15 @@
 package tcp
 
 import (
+	"crypto/sha256"
+	"encoding/binary"
+	"fmt"
 	"runtime"
 	"strings"
 	"time"
 
 	"gvisor.dev/gvisor/pkg/sync"
 	"gvisor.dev/gvisor/pkg/tcpip"
-	"gvisor.dev/gvisor/pkg/tcpip/hash/jenkins"
 	"gvisor.dev/gvisor/pkg/tcpip/header"
 	"gvisor.dev/gvisor/pkg/tcpip/header/parse"
 	"gvisor.dev/gvisor/pkg/tcpip/internal/tcp"
@@ -84,10 +86,11 @@ const (
 	ccCubic = "cubic"
 )
 
+// +stateify savable
 type protocol struct {
 	stack *stack.Stack
 
-	mu                         sync.RWMutex
+	mu                         sync.RWMutex `state:"nosave"`
 	sackEnabled                bool
 	recovery                   tcpip.TCPRecovery
 	delayEnabled               bool
@@ -107,9 +110,8 @@ type protocol struct {
 	dispatcher                 dispatcher
 
 	// The following secrets are initialized once and stay unchanged after.
-	seqnumSecret     uint32
-	portOffsetSecret uint32
-	tsOffsetSecret   uint32
+	seqnumSecret   [16]byte
+	tsOffsetSecret [16]byte
 }
 
 // Number returns the tcp protocol number.
@@ -144,7 +146,7 @@ func (*protocol) ParsePorts(v []byte) (src, dst uint16, err tcpip.Error) {
 // to a specific processing queue. Each queue is serviced by its own processor
 // goroutine which is responsible for dequeuing and doing full TCP dispatch of
 // the packet.
-func (p *protocol) QueuePacket(ep stack.TransportEndpoint, id stack.TransportEndpointID, pkt stack.PacketBufferPtr) {
+func (p *protocol) QueuePacket(ep stack.TransportEndpoint, id stack.TransportEndpointID, pkt *stack.PacketBuffer) {
 	p.dispatcher.queuePacket(ep, id, p.stack.Clock(), pkt)
 }
 
@@ -155,7 +157,7 @@ func (p *protocol) QueuePacket(ep stack.TransportEndpoint, id stack.TransportEnd
 // a reset is sent in response to any incoming segment except another reset. In
 // particular, SYNs addressed to a non-existent connection are rejected by this
 // means."
-func (p *protocol) HandleUnknownDestinationPacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) stack.UnknownDestinationPacketDisposition {
+func (p *protocol) HandleUnknownDestinationPacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) stack.UnknownDestinationPacketDisposition {
 	s, err := newIncomingSegment(id, p.stack.Clock(), pkt)
 	if err != nil {
 		return stack.UnknownDestinationPacketMalformed
@@ -178,16 +180,15 @@ func (p *protocol) tsOffset(src, dst tcpip.Address) tcp.TSOffset {
 	//
 	// See https://tools.ietf.org/html/rfc7323#section-5.4 for details on
 	// why this is required.
-	//
-	// TODO(https://gvisor.dev/issues/6473): This is not really secure as
-	// it does not use the recommended algorithm linked above.
-	h := jenkins.Sum32(p.tsOffsetSecret)
+	h := sha256.New()
+
 	// Per hash.Hash.Writer:
 	//
 	// It never returns an error.
+	_, _ = h.Write(p.tsOffsetSecret[:])
 	_, _ = h.Write(src.AsSlice())
 	_, _ = h.Write(dst.AsSlice())
-	return tcp.NewTSOffset(h.Sum32())
+	return tcp.NewTSOffset(binary.LittleEndian.Uint32(h.Sum(nil)[:4]))
 }
 
 // replyWithReset replies to the given segment with a reset segment.
@@ -480,6 +481,13 @@ func (p *protocol) Option(option tcpip.GettableTransportProtocolOption) tcpip.Er
 	}
 }
 
+// SendBufferSize implements stack.SendBufSizeProto.
+func (p *protocol) SendBufferSize() tcpip.TCPSendBufferSizeRangeOption {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	return p.sendBufferSize
+}
+
 // Close implements stack.TransportProtocol.Close.
 func (p *protocol) Close() {
 	p.dispatcher.close()
@@ -501,12 +509,33 @@ func (p *protocol) Resume() {
 }
 
 // Parse implements stack.TransportProtocol.Parse.
-func (*protocol) Parse(pkt stack.PacketBufferPtr) bool {
+func (*protocol) Parse(pkt *stack.PacketBuffer) bool {
 	return parse.TCP(pkt)
 }
 
-// NewProtocol returns a TCP transport protocol.
+// NewProtocol returns a TCP transport protocol with Reno congestion control.
 func NewProtocol(s *stack.Stack) stack.TransportProtocol {
+	return newProtocol(s, ccReno)
+}
+
+// NewProtocolCUBIC returns a TCP transport protocol with CUBIC congestion
+// control.
+//
+// TODO(b/345835636): Remove this and make CUBIC the default across the board.
+func NewProtocolCUBIC(s *stack.Stack) stack.TransportProtocol {
+	return newProtocol(s, ccCubic)
+}
+
+func newProtocol(s *stack.Stack, cc string) stack.TransportProtocol {
+	rng := s.SecureRNG()
+	var seqnumSecret [16]byte
+	var tsOffsetSecret [16]byte
+	if n, err := rng.Reader.Read(seqnumSecret[:]); err != nil || n != len(seqnumSecret) {
+		panic(fmt.Sprintf("Read() failed: %v", err))
+	}
+	if n, err := rng.Reader.Read(tsOffsetSecret[:]); err != nil || n != len(tsOffsetSecret) {
+		panic(fmt.Sprintf("Read() failed: %v", err))
+	}
 	p := protocol{
 		stack: s,
 		sendBufferSize: tcpip.TCPSendBufferSizeRangeOption{
@@ -519,7 +548,8 @@ func NewProtocol(s *stack.Stack) stack.TransportProtocol {
 			Default: DefaultReceiveBufferSize,
 			Max:     MaxBufferSize,
 		},
-		congestionControl:          ccReno,
+		sackEnabled:                true,
+		congestionControl:          cc,
 		availableCongestionControl: []string{ccReno, ccCubic},
 		moderateReceiveBuffer:      true,
 		lingerTimeout:              DefaultTCPLingerTimeout,
@@ -530,11 +560,10 @@ func NewProtocol(s *stack.Stack) stack.TransportProtocol {
 		maxRTO:                     MaxRTO,
 		maxRetries:                 MaxRetries,
 		recovery:                   tcpip.TCPRACKLossDetection,
-		seqnumSecret:               s.Rand().Uint32(),
-		portOffsetSecret:           s.Rand().Uint32(),
-		tsOffsetSecret:             s.Rand().Uint32(),
+		seqnumSecret:               seqnumSecret,
+		tsOffsetSecret:             tsOffsetSecret,
 	}
-	p.dispatcher.init(s.Rand(), runtime.GOMAXPROCS(0))
+	p.dispatcher.init(s.InsecureRNG(), runtime.GOMAXPROCS(0))
 	return &p
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rack.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rack.go
index 7dccc956..66ea6e5b 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rack.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rack.go
@@ -188,9 +188,9 @@ func (s *sender) schedulePTO() {
 // https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.2.
 //
 // +checklocks:s.ep.mu
-func (s *sender) probeTimerExpired() {
-	if s.probeTimer.isZero() || !s.probeTimer.checkExpiration() {
-		return
+func (s *sender) probeTimerExpired() tcpip.Error {
+	if s.probeTimer.isUninitialized() || !s.probeTimer.checkExpiration() {
+		return nil
 	}
 
 	var dataSent bool
@@ -231,7 +231,7 @@ func (s *sender) probeTimerExpired() {
 	// not the probe timer. This ensures that the sender does not send repeated,
 	// back-to-back tail loss probes.
 	s.postXmit(dataSent, false /* shouldScheduleProbe */)
-	return
+	return nil
 }
 
 // detectTLPRecovery detects if recovery was accomplished by the loss probes
@@ -388,14 +388,14 @@ func (rc *rackControl) detectLoss(rcvTime tcpip.MonotonicTime) int {
 // before the reorder timer expired.
 //
 // +checklocks:rc.snd.ep.mu
-func (rc *rackControl) reorderTimerExpired() {
-	if rc.snd.reorderTimer.isZero() || !rc.snd.reorderTimer.checkExpiration() {
-		return
+func (rc *rackControl) reorderTimerExpired() tcpip.Error {
+	if rc.snd.reorderTimer.isUninitialized() || !rc.snd.reorderTimer.checkExpiration() {
+		return nil
 	}
 
 	numLost := rc.detectLoss(rc.snd.ep.stack.Clock().NowMonotonic())
 	if numLost == 0 {
-		return
+		return nil
 	}
 
 	fastRetransmit := false
@@ -406,7 +406,7 @@ func (rc *rackControl) reorderTimerExpired() {
 	}
 
 	rc.DoRecovery(nil, fastRetransmit)
-	return
+	return nil
 }
 
 // DoRecovery implements lossRecovery.DoRecovery.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go
index 292e0d0e..349f950f 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go
@@ -30,7 +30,7 @@ import (
 // +stateify savable
 type receiver struct {
 	stack.TCPReceiverState
-	ep *endpoint
+	ep *Endpoint
 
 	// rcvWnd is the non-scaled receive window last advertised to the peer.
 	rcvWnd seqnum.Size
@@ -52,7 +52,7 @@ type receiver struct {
 	lastRcvdAckTime tcpip.MonotonicTime
 }
 
-func newReceiver(ep *endpoint, irs seqnum.Value, rcvWnd seqnum.Size, rcvWndScale uint8) *receiver {
+func newReceiver(ep *Endpoint, irs seqnum.Value, rcvWnd seqnum.Size, rcvWndScale uint8) *receiver {
 	return &receiver{
 		ep: ep,
 		TCPReceiverState: stack.TCPReceiverState{
@@ -554,7 +554,7 @@ func (r *receiver) handleTimeWaitSegment(s *segment) (resetTimeWait bool, newSyn
 	segLen := seqnum.Size(s.payloadSize())
 
 	// Just silently drop any RST packets in TIME_WAIT. We do not support
-	// TIME_WAIT assasination as a result we confirm w/ fix 1 as described
+	// TIME_WAIT assassination as a result we confirm w/ fix 1 as described
 	// in https://tools.ietf.org/html/rfc1337#section-3.
 	//
 	// This behavior overrides RFC793 page 70 where we transition to CLOSED
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/reno.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/reno.go
index 063552c7..2d1b011d 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/reno.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/reno.go
@@ -14,6 +14,10 @@
 
 package tcp
 
+import (
+	"time"
+)
+
 // renoState stores the variables related to TCP New Reno congestion
 // control algorithm.
 //
@@ -69,7 +73,7 @@ func (r *renoState) reduceSlowStartThreshold() {
 // Update updates the congestion state based on the number of packets that
 // were acknowledged.
 // Update implements congestionControl.Update.
-func (r *renoState) Update(packetsAcked int) {
+func (r *renoState) Update(packetsAcked int, _ time.Duration) {
 	if r.s.SndCwnd < r.s.Ssthresh {
 		packetsAcked = r.updateSlowStart(packetsAcked)
 		if packetsAcked == 0 {
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go
index df520658..6de583da 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go
@@ -55,11 +55,11 @@ type segment struct {
 	segmentEntry
 	segmentRefs
 
-	ep     *endpoint
+	ep     *Endpoint
 	qFlags queueFlags
 	id     stack.TransportEndpointID `state:"manual"`
 
-	pkt stack.PacketBufferPtr
+	pkt *stack.PacketBuffer
 
 	sequenceNumber seqnum.Value
 	ackNumber      seqnum.Value
@@ -92,15 +92,29 @@ type segment struct {
 	lost bool
 }
 
-func newIncomingSegment(id stack.TransportEndpointID, clock tcpip.Clock, pkt stack.PacketBufferPtr) (*segment, error) {
+func newIncomingSegment(id stack.TransportEndpointID, clock tcpip.Clock, pkt *stack.PacketBuffer) (*segment, error) {
 	hdr := header.TCP(pkt.TransportHeader().Slice())
-	netHdr := pkt.Network()
+	var srcAddr tcpip.Address
+	var dstAddr tcpip.Address
+	switch netProto := pkt.NetworkProtocolNumber; netProto {
+	case header.IPv4ProtocolNumber:
+		hdr := header.IPv4(pkt.NetworkHeader().Slice())
+		srcAddr = hdr.SourceAddress()
+		dstAddr = hdr.DestinationAddress()
+	case header.IPv6ProtocolNumber:
+		hdr := header.IPv6(pkt.NetworkHeader().Slice())
+		srcAddr = hdr.SourceAddress()
+		dstAddr = hdr.DestinationAddress()
+	default:
+		panic(fmt.Sprintf("unknown network protocol number %d", netProto))
+	}
+
 	csum, csumValid, ok := header.TCPValid(
 		hdr,
 		func() uint16 { return pkt.Data().Checksum() },
 		uint16(pkt.Data().Size()),
-		netHdr.SourceAddress(),
-		netHdr.DestinationAddress(),
+		srcAddr,
+		dstAddr,
 		pkt.RXChecksumValidated)
 	if !ok {
 		return nil, fmt.Errorf("header data offset does not respect size constraints: %d < offset < %d, got offset=%d", header.TCPMinimumSize, len(hdr), hdr.DataOffset())
@@ -168,7 +182,7 @@ func (s *segment) merge(oth *segment) {
 // setOwner sets the owning endpoint for this segment. Its required
 // to be called to ensure memory accounting for receive/send buffer
 // queues is done properly.
-func (s *segment) setOwner(ep *endpoint, qFlags queueFlags) {
+func (s *segment) setOwner(ep *Endpoint, qFlags queueFlags) {
 	switch qFlags {
 	case recvQ:
 		ep.updateReceiveMemUsed(s.segMemSize())
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go
index 53839387..6f003efc 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go
@@ -24,7 +24,7 @@ import (
 type segmentQueue struct {
 	mu     sync.Mutex  `state:"nosave"`
 	list   segmentList `state:"wait"`
-	ep     *endpoint
+	ep     *Endpoint
 	frozen bool
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_state.go
index 57bbd69f..76ab5629 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_state.go
@@ -14,6 +14,10 @@
 
 package tcp
 
+import (
+	"context"
+)
+
 // saveOptions is invoked by stateify.
 func (s *segment) saveOptions() []byte {
 	// We cannot save s.options directly as it may point to s.data's trimmed
@@ -23,7 +27,7 @@ func (s *segment) saveOptions() []byte {
 }
 
 // loadOptions is invoked by stateify.
-func (s *segment) loadOptions(options []byte) {
+func (s *segment) loadOptions(_ context.Context, options []byte) {
 	// NOTE: We cannot point s.options back into s.data's trimmed tail. But
 	// it is OK as they do not need to aliased. Plus, options is already
 	// allocated so there is no cost here.
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go
index a90f46e3..1e20ac2c 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go
@@ -20,6 +20,7 @@ import (
 	"sort"
 	"time"
 
+	"gvisor.dev/gvisor/pkg/buffer"
 	"gvisor.dev/gvisor/pkg/sync"
 	"gvisor.dev/gvisor/pkg/tcpip"
 	"gvisor.dev/gvisor/pkg/tcpip/header"
@@ -48,6 +49,16 @@ const (
 	// before timing out the connection.
 	// Linux default TCP_RETR2, net.ipv4.tcp_retries2.
 	MaxRetries = 15
+
+	// InitialSsthresh is the the maximum int value, which depends on the
+	// platform.
+	InitialSsthresh = math.MaxInt
+
+	// unknownRTT is used to indicate to congestion control algorithms that we
+	// were unable to measure the round-trip time when processing ACKs.
+	// Algorithms (such as HyStart) that use the round-trip time should ignore
+	// such Updates.
+	unknownRTT = time.Duration(-1)
 )
 
 // congestionControl is an interface that must be implemented by any supported
@@ -63,8 +74,9 @@ type congestionControl interface {
 
 	// Update is invoked when processing inbound acks. It's passed the
 	// number of packet's that were acked by the most recent cumulative
-	// acknowledgement.
-	Update(packetsAcked int)
+	// acknowledgement.  rtt is the round-trip time, or is set to unknownRTT
+	// (above) to indicate the time is unknown.
+	Update(packetsAcked int, rtt time.Duration)
 
 	// PostRecovery is invoked when the sender is exiting a fast retransmit/
 	// recovery phase. This provides congestion control algorithms a way
@@ -88,7 +100,7 @@ type lossRecovery interface {
 // +stateify savable
 type sender struct {
 	stack.TCPSenderState
-	ep *endpoint
+	ep *Endpoint
 
 	// lr is the loss recovery algorithm used by the sender.
 	lr lossRecovery
@@ -105,8 +117,16 @@ type sender struct {
 	// window probes.
 	unackZeroWindowProbes uint32 `state:"nosave"`
 
-	writeNext   *segment
-	writeList   segmentList
+	// writeNext is the next segment to write that hasn't already been
+	// written, i.e. the first payload starting at SND.NXT.
+	writeNext *segment
+
+	// writeList holds all writable data: both unsent data and
+	// sent-but-unacknowledged data. Alternatively: it holds all bytes
+	// starting from SND.UNA.
+	writeList segmentList
+
+	// resendTimer is used for RTOs.
 	resendTimer timer `state:"nosave"`
 
 	// rtt.TCPRTTState.SRTT and rtt.TCPRTTState.RTTVar are the "smoothed
@@ -151,6 +171,13 @@ type sender struct {
 	// segment after entering an RTO for the first time as described in
 	// RFC3522 Section 3.2.
 	retransmitTS uint32
+
+	// startCork start corking the segments.
+	startCork bool
+
+	// corkTimer is used to drain the segments which are held when TCP_CORK
+	// option is enabled.
+	corkTimer timer `state:"nosave"`
 }
 
 // rtt is a synchronization wrapper used to appease stateify. See the comment
@@ -164,7 +191,7 @@ type rtt struct {
 }
 
 // +checklocks:ep.mu
-func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint16, sndWndScale int) *sender {
+func newSender(ep *Endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint16, sndWndScale int) *sender {
 	// The sender MUST reduce the TCP data length to account for any IP or
 	// TCP options that it is including in the packets that it sends.
 	// See: https://tools.ietf.org/html/rfc6691#section-2
@@ -205,9 +232,10 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint
 		s.SndWndScale = uint8(sndWndScale)
 	}
 
-	s.resendTimer.init(s.ep.stack.Clock(), maybeFailTimerHandler(s.ep, s.retransmitTimerExpired))
+	s.resendTimer.init(s.ep.stack.Clock(), timerHandler(s.ep, s.retransmitTimerExpired))
 	s.reorderTimer.init(s.ep.stack.Clock(), timerHandler(s.ep, s.rc.reorderTimerExpired))
 	s.probeTimer.init(s.ep.stack.Clock(), timerHandler(s.ep, s.probeTimerExpired))
+	s.corkTimer.init(s.ep.stack.Clock(), timerHandler(s.ep, s.corkTimerExpired))
 
 	s.ep.AssertLockHeld(ep)
 	s.updateMaxPayloadSize(int(ep.route.MTU()), 0)
@@ -243,9 +271,7 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint
 // their initial values.
 func (s *sender) initCongestionControl(congestionControlName tcpip.CongestionControlOption) congestionControl {
 	s.SndCwnd = InitialCwnd
-	// Set sndSsthresh to the maximum int value, which depends on the
-	// platform.
-	s.Ssthresh = int(^uint(0) >> 1)
+	s.Ssthresh = InitialSsthresh
 
 	switch congestionControlName {
 	case ccCubic:
@@ -437,7 +463,7 @@ func (s *sender) resendSegment() {
 func (s *sender) retransmitTimerExpired() tcpip.Error {
 	// Check if the timer actually expired or if it's a spurious wake due
 	// to a previously orphaned runtime timer.
-	if s.resendTimer.isZero() || !s.resendTimer.checkExpiration() {
+	if s.resendTimer.isUninitialized() || !s.resendTimer.checkExpiration() {
 		return nil
 	}
 
@@ -646,12 +672,12 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt
 		//     1. If there exists a smallest unSACKED sequence number
 		//     'S2' that meets the following 3 criteria for determinig
 		//     loss, the sequence range of one segment of up to SMSS
-		//     octects starting with S2 MUST be returned.
+		//     octets starting with S2 MUST be returned.
 		if !s.ep.scoreboard.IsSACKED(header.SACKBlock{Start: segSeq, End: segSeq.Add(1)}) {
 			// NextSeg():
 			//
 			//    (1.a) S2 is greater than HighRxt
-			//    (1.b) S2 is less than highest octect covered by
+			//    (1.b) S2 is less than highest octet covered by
 			//    any received SACK.
 			if s.FastRecovery.HighRxt.LessThan(segSeq) && segSeq.LessThan(s.ep.scoreboard.maxSACKED) {
 				// NextSeg():
@@ -682,7 +708,7 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt
 			//     retransmission per entry into loss recovery. If
 			//     HighACK is greater than RescueRxt (or RescueRxt
 			//     is undefined), then one segment of upto SMSS
-			//     octects that MUST include the highest outstanding
+			//     octets that MUST include the highest outstanding
 			//     unSACKed sequence number SHOULD be returned, and
 			//     RescueRxt set to RecoveryPoint. HighRxt MUST NOT
 			//     be updated.
@@ -776,10 +802,20 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se
 				}
 				// With TCP_CORK, hold back until minimum of the available
 				// send space and MSS.
-				// TODO(gvisor.dev/issue/2833): Drain the held segments after a
-				// timeout.
-				if seg.payloadSize() < s.MaxPayloadSize && s.ep.ops.GetCorkOption() {
-					return false
+				if s.ep.ops.GetCorkOption() {
+					if seg.payloadSize() < s.MaxPayloadSize {
+						if !s.startCork {
+							s.startCork = true
+							// Enable the timer for
+							// 200ms, after which
+							// the segments are drained.
+							s.corkTimer.enable(MinRTO)
+						}
+						return false
+					}
+					// Disable the TCP_CORK timer.
+					s.startCork = false
+					s.corkTimer.disable()
 				}
 			}
 		}
@@ -822,7 +858,7 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se
 		}
 
 		// If the whole segment or at least 1MSS sized segment cannot
-		// be accomodated in the receiver advertized window, skip
+		// be accommodated in the receiver advertised window, skip
 		// splitting and sending of the segment. ref:
 		// net/ipv4/tcp_output.c::tcp_snd_wnd_test()
 		//
@@ -886,12 +922,25 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se
 	return true
 }
 
+// zeroProbeJunk is data sent during zero window probes. Its value is
+// irrelevant; since the sequence number has already been acknowledged it will
+// be discarded. It's only here to avoid allocating.
+var zeroProbeJunk = []byte{0}
+
 // +checklocks:s.ep.mu
 func (s *sender) sendZeroWindowProbe() {
 	s.unackZeroWindowProbes++
-	// Send a zero window probe with sequence number pointing to
-	// the last acknowledged byte.
-	s.sendEmptySegment(header.TCPFlagAck, s.SndUna-1)
+
+	// Send a zero window probe with sequence number pointing to the last
+	// acknowledged byte. Note that, like Linux, this isn't quite what RFC
+	// 9293 3.8.6.1 describes: we don't send the next byte in the stream,
+	// we re-send an ACKed byte to goad the receiver into responding.
+	pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
+		Payload: buffer.MakeWithData(zeroProbeJunk),
+	})
+	defer pkt.DecRef()
+	s.sendSegmentFromPacketBuffer(pkt, header.TCPFlagAck, s.SndUna-1)
+
 	// Rearm the timer to continue probing.
 	s.resendTimer.enable(s.RTO)
 }
@@ -921,7 +970,7 @@ func (s *sender) postXmit(dataSent bool, shouldScheduleProbe bool) {
 		s.ep.disableKeepaliveTimer()
 	}
 
-	// If the sender has advertized zero receive window and we have
+	// If the sender has advertised zero receive window and we have
 	// data to be sent out, start zero window probing to query the
 	// the remote for it's receive window size.
 	if s.writeNext != nil && s.SndWnd == 0 {
@@ -953,7 +1002,7 @@ func (s *sender) postXmit(dataSent bool, shouldScheduleProbe bool) {
 func (s *sender) sendData() {
 	limit := s.MaxPayloadSize
 	if s.gso {
-		limit = int(s.ep.gso.MaxSize - header.TCPHeaderMaximumSize)
+		limit = int(s.ep.gso.MaxSize - header.TCPTotalHeaderMaximumSize - 1)
 	}
 	end := s.SndUna.Add(s.SndWnd)
 
@@ -1091,7 +1140,7 @@ func (s *sender) SetPipe() {
 			//
 			// NOTE: here we mark the whole segment as lost. We do not try
 			// and test every byte in our write buffer as we maintain our
-			// pipe in terms of oustanding packets and not bytes.
+			// pipe in terms of outstanding packets and not bytes.
 			if !s.ep.scoreboard.IsRangeLost(sb) {
 				pipe++
 			}
@@ -1379,9 +1428,12 @@ func (s *sender) inRecovery() bool {
 // +checklocks:s.ep.mu
 // +checklocksalias:s.rc.snd.ep.mu=s.ep.mu
 func (s *sender) handleRcvdSegment(rcvdSeg *segment) {
+	bestRTT := unknownRTT
+
 	// Check if we can extract an RTT measurement from this ack.
 	if !rcvdSeg.parsedOptions.TS && s.RTTMeasureSeqNum.LessThan(rcvdSeg.ackNumber) {
-		s.updateRTO(s.ep.stack.Clock().NowMonotonic().Sub(s.RTTMeasureTime))
+		bestRTT = s.ep.stack.Clock().NowMonotonic().Sub(s.RTTMeasureTime)
+		s.updateRTO(bestRTT)
 		s.RTTMeasureSeqNum = s.SndNxt
 	}
 
@@ -1453,7 +1505,7 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {
 	// Stash away the current window size.
 	s.SndWnd = rcvdSeg.window
 
-	// Disable zero window probing if remote advertizes a non-zero receive
+	// Disable zero window probing if remote advertises a non-zero receive
 	// window. This can be with an ACK to the zero window probe (where the
 	// acknumber refers to the already acknowledged byte) OR to any previously
 	// unacknowledged segment.
@@ -1483,7 +1535,14 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {
 		//    some new data, i.e., only if it advances the left edge of
 		//    the send window.
 		if s.ep.SendTSOk && rcvdSeg.parsedOptions.TSEcr != 0 {
-			s.updateRTO(s.ep.elapsed(s.ep.stack.Clock().NowMonotonic(), rcvdSeg.parsedOptions.TSEcr))
+			tsRTT := s.ep.elapsed(s.ep.stack.Clock().NowMonotonic(), rcvdSeg.parsedOptions.TSEcr)
+			s.updateRTO(tsRTT)
+			// Following Linux, prefer RTT computed from ACKs to TSEcr because,
+			// "broken middle-boxes or peers may corrupt TS-ECR fields"
+			// https://github.com/torvalds/linux/blob/39cd87c4eb2b893354f3b850f916353f2658ae6f/net/ipv4/tcp_input.c#L3141C1-L3144C24
+			if bestRTT == unknownRTT {
+				bestRTT = tsRTT
+			}
 		}
 
 		if s.shouldSchedulePTO() {
@@ -1507,8 +1566,11 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {
 			// segments (which are always at the end of list) that
 			// have no data, but do consume a sequence number.
 			seg := s.writeList.Front()
-			datalen := seg.logicalLen()
+			if seg == nil {
+				panic(fmt.Sprintf("invalid state: there are %d unacknowledged bytes left, but the write list is empty:\n%+v", ackLeft, s.TCPSenderState))
+			}
 
+			datalen := seg.logicalLen()
 			if datalen > ackLeft {
 				prevCount := s.pCount(seg, s.MaxPayloadSize)
 				seg.TrimFront(ackLeft)
@@ -1552,7 +1614,7 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {
 		// If we are not in fast recovery then update the congestion
 		// window based on the number of acknowledged packets.
 		if !s.FastRecovery.Active {
-			s.cc.Update(originalOutstanding - s.Outstanding)
+			s.cc.Update(originalOutstanding-s.Outstanding, bestRTT)
 			if s.FastRecovery.Last.LessThan(s.SndUna) {
 				s.state = tcpip.Open
 				// Update RACK when we are exiting fast or RTO
@@ -1668,7 +1730,7 @@ func (s *sender) sendSegment(seg *segment) tcpip.Error {
 // flags and sequence number.
 // +checklocks:s.ep.mu
 // +checklocksalias:s.ep.rcv.ep.mu=s.ep.mu
-func (s *sender) sendSegmentFromPacketBuffer(pkt stack.PacketBufferPtr, flags header.TCPFlags, seq seqnum.Value) tcpip.Error {
+func (s *sender) sendSegmentFromPacketBuffer(pkt *stack.PacketBuffer, flags header.TCPFlags, seq seqnum.Value) tcpip.Error {
 	s.LastSendTime = s.ep.stack.Clock().NowMonotonic()
 	if seq == s.RTTMeasureSeqNum {
 		s.RTTMeasureTime = s.LastSendTime
@@ -1724,3 +1786,24 @@ func (s *sender) updateWriteNext(seg *segment) {
 	}
 	s.writeNext = seg
 }
+
+// corkTimerExpired drains all the segments when TCP_CORK is enabled.
+// +checklocks:s.ep.mu
+func (s *sender) corkTimerExpired() tcpip.Error {
+	// Check if the timer actually expired or if it's a spurious wake due
+	// to a previously orphaned runtime timer.
+	if s.corkTimer.isUninitialized() || !s.corkTimer.checkExpiration() {
+		return nil
+	}
+
+	// Assign sequence number and flags to the segment.
+	seg := s.writeNext
+	if seg == nil {
+		return nil
+	}
+	seg.sequenceNumber = s.SndNxt
+	seg.flags = header.TCPFlagAck | header.TCPFlagPsh
+	// Drain all the segments.
+	s.sendData()
+	return nil
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_endpoint_list.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_endpoint_list.go
index 3129cf2b..67bfa999 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_endpoint_list.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_endpoint_list.go
@@ -13,7 +13,7 @@ type endpointElementMapper struct{}
 // This default implementation should be inlined.
 //
 //go:nosplit
-func (endpointElementMapper) linkerFor(elem *endpoint) *endpoint { return elem }
+func (endpointElementMapper) linkerFor(elem *Endpoint) *Endpoint { return elem }
 
 // List is an intrusive list. Entries can be added to or removed from the list
 // in O(1) time and with no additional memory allocations.
@@ -28,8 +28,8 @@ func (endpointElementMapper) linkerFor(elem *endpoint) *endpoint { return elem }
 //
 // +stateify savable
 type endpointList struct {
-	head *endpoint
-	tail *endpoint
+	head *Endpoint
+	tail *Endpoint
 }
 
 // Reset resets list l to the empty state.
@@ -48,14 +48,14 @@ func (l *endpointList) Empty() bool {
 // Front returns the first element of list l or nil.
 //
 //go:nosplit
-func (l *endpointList) Front() *endpoint {
+func (l *endpointList) Front() *Endpoint {
 	return l.head
 }
 
 // Back returns the last element of list l or nil.
 //
 //go:nosplit
-func (l *endpointList) Back() *endpoint {
+func (l *endpointList) Back() *Endpoint {
 	return l.tail
 }
 
@@ -74,7 +74,7 @@ func (l *endpointList) Len() (count int) {
 // PushFront inserts the element e at the front of list l.
 //
 //go:nosplit
-func (l *endpointList) PushFront(e *endpoint) {
+func (l *endpointList) PushFront(e *Endpoint) {
 	linker := endpointElementMapper{}.linkerFor(e)
 	linker.SetNext(l.head)
 	linker.SetPrev(nil)
@@ -107,7 +107,7 @@ func (l *endpointList) PushFrontList(m *endpointList) {
 // PushBack inserts the element e at the back of list l.
 //
 //go:nosplit
-func (l *endpointList) PushBack(e *endpoint) {
+func (l *endpointList) PushBack(e *Endpoint) {
 	linker := endpointElementMapper{}.linkerFor(e)
 	linker.SetNext(nil)
 	linker.SetPrev(l.tail)
@@ -140,7 +140,7 @@ func (l *endpointList) PushBackList(m *endpointList) {
 // InsertAfter inserts e after b.
 //
 //go:nosplit
-func (l *endpointList) InsertAfter(b, e *endpoint) {
+func (l *endpointList) InsertAfter(b, e *Endpoint) {
 	bLinker := endpointElementMapper{}.linkerFor(b)
 	eLinker := endpointElementMapper{}.linkerFor(e)
 
@@ -160,7 +160,7 @@ func (l *endpointList) InsertAfter(b, e *endpoint) {
 // InsertBefore inserts e before a.
 //
 //go:nosplit
-func (l *endpointList) InsertBefore(a, e *endpoint) {
+func (l *endpointList) InsertBefore(a, e *Endpoint) {
 	aLinker := endpointElementMapper{}.linkerFor(a)
 	eLinker := endpointElementMapper{}.linkerFor(e)
 
@@ -179,7 +179,7 @@ func (l *endpointList) InsertBefore(a, e *endpoint) {
 // Remove removes e from l.
 //
 //go:nosplit
-func (l *endpointList) Remove(e *endpoint) {
+func (l *endpointList) Remove(e *Endpoint) {
 	linker := endpointElementMapper{}.linkerFor(e)
 	prev := linker.Prev()
 	next := linker.Next()
@@ -206,34 +206,34 @@ func (l *endpointList) Remove(e *endpoint) {
 //
 // +stateify savable
 type endpointEntry struct {
-	next *endpoint
-	prev *endpoint
+	next *Endpoint
+	prev *Endpoint
 }
 
 // Next returns the entry that follows e in the list.
 //
 //go:nosplit
-func (e *endpointEntry) Next() *endpoint {
+func (e *endpointEntry) Next() *Endpoint {
 	return e.next
 }
 
 // Prev returns the entry that precedes e in the list.
 //
 //go:nosplit
-func (e *endpointEntry) Prev() *endpoint {
+func (e *endpointEntry) Prev() *Endpoint {
 	return e.prev
 }
 
 // SetNext assigns 'entry' as the entry that follows e in the list.
 //
 //go:nosplit
-func (e *endpointEntry) SetNext(elem *endpoint) {
+func (e *endpointEntry) SetNext(elem *Endpoint) {
 	e.next = elem
 }
 
 // SetPrev assigns 'entry' as the entry that precedes e in the list.
 //
 //go:nosplit
-func (e *endpointEntry) SetPrev(elem *endpoint) {
+func (e *endpointEntry) SetPrev(elem *Endpoint) {
 	e.prev = elem
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go
index e40251b2..a06b3f35 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go
@@ -1,6 +1,7 @@
 package tcp
 
 import (
+	"context"
 	"fmt"
 
 	"gvisor.dev/gvisor/pkg/atomicbitops"
@@ -134,7 +135,7 @@ func (r *segmentRefs) DecRef(destroy func()) {
 	}
 }
 
-func (r *segmentRefs) afterLoad() {
+func (r *segmentRefs) afterLoad(context.Context) {
 	if r.ReadRefs() > 0 {
 		refs.Register(r)
 	}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go
index 1f38a830..7bfef39e 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go
@@ -3,6 +3,8 @@
 package tcp
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -23,20 +25,20 @@ func (a *acceptQueue) beforeSave() {}
 // +checklocksignore
 func (a *acceptQueue) StateSave(stateSinkObject state.Sink) {
 	a.beforeSave()
-	var endpointsValue []*endpoint
+	var endpointsValue []*Endpoint
 	endpointsValue = a.saveEndpoints()
 	stateSinkObject.SaveValue(0, endpointsValue)
 	stateSinkObject.Save(1, &a.pendingEndpoints)
 	stateSinkObject.Save(2, &a.capacity)
 }
 
-func (a *acceptQueue) afterLoad() {}
+func (a *acceptQueue) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (a *acceptQueue) StateLoad(stateSourceObject state.Source) {
+func (a *acceptQueue) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(1, &a.pendingEndpoints)
 	stateSourceObject.Load(2, &a.capacity)
-	stateSourceObject.LoadValue(0, new([]*endpoint), func(y any) { a.loadEndpoints(y.([]*endpoint)) })
+	stateSourceObject.LoadValue(0, new([]*Endpoint), func(y any) { a.loadEndpoints(ctx, y.([]*Endpoint)) })
 }
 
 func (h *handshake) StateTypeName() string {
@@ -89,10 +91,10 @@ func (h *handshake) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(16, &h.sampleRTTWithTSOnly)
 }
 
-func (h *handshake) afterLoad() {}
+func (h *handshake) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (h *handshake) StateLoad(stateSourceObject state.Source) {
+func (h *handshake) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &h.ep)
 	stateSourceObject.Load(1, &h.listenEP)
 	stateSourceObject.Load(2, &h.state)
@@ -134,15 +136,127 @@ func (c *cubicState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &c.s)
 }
 
-func (c *cubicState) afterLoad() {}
+func (c *cubicState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (c *cubicState) StateLoad(stateSourceObject state.Source) {
+func (c *cubicState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &c.TCPCubicState)
 	stateSourceObject.Load(1, &c.numCongestionEvents)
 	stateSourceObject.Load(2, &c.s)
 }
 
+func (q *epQueue) StateTypeName() string {
+	return "pkg/tcpip/transport/tcp.epQueue"
+}
+
+func (q *epQueue) StateFields() []string {
+	return []string{
+		"list",
+	}
+}
+
+func (q *epQueue) beforeSave() {}
+
+// +checklocksignore
+func (q *epQueue) StateSave(stateSinkObject state.Sink) {
+	q.beforeSave()
+	stateSinkObject.Save(0, &q.list)
+}
+
+func (q *epQueue) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (q *epQueue) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &q.list)
+}
+
+func (p *processor) StateTypeName() string {
+	return "pkg/tcpip/transport/tcp.processor"
+}
+
+func (p *processor) StateFields() []string {
+	return []string{
+		"epQ",
+		"sleeper",
+	}
+}
+
+func (p *processor) beforeSave() {}
+
+// +checklocksignore
+func (p *processor) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.epQ)
+	stateSinkObject.Save(1, &p.sleeper)
+}
+
+func (p *processor) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *processor) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.epQ)
+	stateSourceObject.Load(1, &p.sleeper)
+}
+
+func (d *dispatcher) StateTypeName() string {
+	return "pkg/tcpip/transport/tcp.dispatcher"
+}
+
+func (d *dispatcher) StateFields() []string {
+	return []string{
+		"processors",
+		"hasher",
+		"paused",
+		"closed",
+	}
+}
+
+func (d *dispatcher) beforeSave() {}
+
+// +checklocksignore
+func (d *dispatcher) StateSave(stateSinkObject state.Sink) {
+	d.beforeSave()
+	stateSinkObject.Save(0, &d.processors)
+	stateSinkObject.Save(1, &d.hasher)
+	stateSinkObject.Save(2, &d.paused)
+	stateSinkObject.Save(3, &d.closed)
+}
+
+func (d *dispatcher) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (d *dispatcher) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &d.processors)
+	stateSourceObject.Load(1, &d.hasher)
+	stateSourceObject.Load(2, &d.paused)
+	stateSourceObject.Load(3, &d.closed)
+}
+
+func (j *jenkinsHasher) StateTypeName() string {
+	return "pkg/tcpip/transport/tcp.jenkinsHasher"
+}
+
+func (j *jenkinsHasher) StateFields() []string {
+	return []string{
+		"seed",
+	}
+}
+
+func (j *jenkinsHasher) beforeSave() {}
+
+// +checklocksignore
+func (j *jenkinsHasher) StateSave(stateSinkObject state.Sink) {
+	j.beforeSave()
+	stateSinkObject.Save(0, &j.seed)
+}
+
+func (j *jenkinsHasher) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (j *jenkinsHasher) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &j.seed)
+}
+
 func (s *SACKInfo) StateTypeName() string {
 	return "pkg/tcpip/transport/tcp.SACKInfo"
 }
@@ -163,10 +277,10 @@ func (s *SACKInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &s.NumBlocks)
 }
 
-func (s *SACKInfo) afterLoad() {}
+func (s *SACKInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *SACKInfo) StateLoad(stateSourceObject state.Source) {
+func (s *SACKInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &s.Blocks)
 	stateSourceObject.Load(1, &s.NumBlocks)
 }
@@ -201,10 +315,10 @@ func (r *ReceiveErrors) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(6, &r.WantZeroRcvWindow)
 }
 
-func (r *ReceiveErrors) afterLoad() {}
+func (r *ReceiveErrors) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *ReceiveErrors) StateLoad(stateSourceObject state.Source) {
+func (r *ReceiveErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.ReceiveErrors)
 	stateSourceObject.Load(1, &r.SegmentQueueDropped)
 	stateSourceObject.Load(2, &r.ChecksumErrors)
@@ -242,10 +356,10 @@ func (s *SendErrors) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(5, &s.Timeouts)
 }
 
-func (s *SendErrors) afterLoad() {}
+func (s *SendErrors) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *SendErrors) StateLoad(stateSourceObject state.Source) {
+func (s *SendErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &s.SendErrors)
 	stateSourceObject.Load(1, &s.SegmentSendToNetworkFailed)
 	stateSourceObject.Load(2, &s.SynSendToNetworkFailed)
@@ -284,10 +398,10 @@ func (s *Stats) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(6, &s.WriteErrors)
 }
 
-func (s *Stats) afterLoad() {}
+func (s *Stats) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *Stats) StateLoad(stateSourceObject state.Source) {
+func (s *Stats) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &s.SegmentsReceived)
 	stateSourceObject.Load(1, &s.SegmentsSent)
 	stateSourceObject.Load(2, &s.FailedConnectionAttempts)
@@ -315,24 +429,23 @@ func (sq *sndQueueInfo) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &sq.TCPSndBufState)
 }
 
-func (sq *sndQueueInfo) afterLoad() {}
+func (sq *sndQueueInfo) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (sq *sndQueueInfo) StateLoad(stateSourceObject state.Source) {
+func (sq *sndQueueInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &sq.TCPSndBufState)
 }
 
-func (e *endpoint) StateTypeName() string {
-	return "pkg/tcpip/transport/tcp.endpoint"
+func (e *Endpoint) StateTypeName() string {
+	return "pkg/tcpip/transport/tcp.Endpoint"
 }
 
-func (e *endpoint) StateFields() []string {
+func (e *Endpoint) StateFields() []string {
 	return []string{
 		"TCPEndpointStateInner",
 		"TransportEndpointInfo",
 		"DefaultSocketOptionsHandler",
 		"waiterQueue",
-		"uniqueID",
 		"hardError",
 		"lastError",
 		"TCPRcvBufState",
@@ -380,123 +493,124 @@ func (e *endpoint) StateFields() []string {
 		"owner",
 		"ops",
 		"lastOutOfWindowAckTime",
+		"pmtud",
 	}
 }
 
 // +checklocksignore
-func (e *endpoint) StateSave(stateSinkObject state.Sink) {
+func (e *Endpoint) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 	var stateValue EndpointState
 	stateValue = e.saveState()
-	stateSinkObject.SaveValue(11, stateValue)
+	stateSinkObject.SaveValue(10, stateValue)
 	stateSinkObject.Save(0, &e.TCPEndpointStateInner)
 	stateSinkObject.Save(1, &e.TransportEndpointInfo)
 	stateSinkObject.Save(2, &e.DefaultSocketOptionsHandler)
 	stateSinkObject.Save(3, &e.waiterQueue)
-	stateSinkObject.Save(4, &e.uniqueID)
-	stateSinkObject.Save(5, &e.hardError)
-	stateSinkObject.Save(6, &e.lastError)
-	stateSinkObject.Save(7, &e.TCPRcvBufState)
-	stateSinkObject.Save(8, &e.rcvMemUsed)
-	stateSinkObject.Save(9, &e.ownedByUser)
-	stateSinkObject.Save(10, &e.rcvQueue)
-	stateSinkObject.Save(12, &e.connectionDirectionState)
-	stateSinkObject.Save(13, &e.boundNICID)
-	stateSinkObject.Save(14, &e.ipv4TTL)
-	stateSinkObject.Save(15, &e.ipv6HopLimit)
-	stateSinkObject.Save(16, &e.isConnectNotified)
-	stateSinkObject.Save(17, &e.h)
-	stateSinkObject.Save(18, &e.portFlags)
-	stateSinkObject.Save(19, &e.boundBindToDevice)
-	stateSinkObject.Save(20, &e.boundPortFlags)
-	stateSinkObject.Save(21, &e.boundDest)
-	stateSinkObject.Save(22, &e.effectiveNetProtos)
-	stateSinkObject.Save(23, &e.recentTSTime)
-	stateSinkObject.Save(24, &e.shutdownFlags)
-	stateSinkObject.Save(25, &e.tcpRecovery)
-	stateSinkObject.Save(26, &e.sack)
-	stateSinkObject.Save(27, &e.delay)
-	stateSinkObject.Save(28, &e.scoreboard)
-	stateSinkObject.Save(29, &e.segmentQueue)
-	stateSinkObject.Save(30, &e.userMSS)
-	stateSinkObject.Save(31, &e.maxSynRetries)
-	stateSinkObject.Save(32, &e.windowClamp)
-	stateSinkObject.Save(33, &e.sndQueueInfo)
-	stateSinkObject.Save(34, &e.cc)
-	stateSinkObject.Save(35, &e.keepalive)
-	stateSinkObject.Save(36, &e.userTimeout)
-	stateSinkObject.Save(37, &e.deferAccept)
-	stateSinkObject.Save(38, &e.acceptQueue)
-	stateSinkObject.Save(39, &e.rcv)
-	stateSinkObject.Save(40, &e.snd)
-	stateSinkObject.Save(41, &e.connectingAddress)
-	stateSinkObject.Save(42, &e.amss)
-	stateSinkObject.Save(43, &e.sendTOS)
-	stateSinkObject.Save(44, &e.gso)
-	stateSinkObject.Save(45, &e.stats)
-	stateSinkObject.Save(46, &e.tcpLingerTimeout)
-	stateSinkObject.Save(47, &e.closed)
-	stateSinkObject.Save(48, &e.txHash)
-	stateSinkObject.Save(49, &e.owner)
-	stateSinkObject.Save(50, &e.ops)
-	stateSinkObject.Save(51, &e.lastOutOfWindowAckTime)
+	stateSinkObject.Save(4, &e.hardError)
+	stateSinkObject.Save(5, &e.lastError)
+	stateSinkObject.Save(6, &e.TCPRcvBufState)
+	stateSinkObject.Save(7, &e.rcvMemUsed)
+	stateSinkObject.Save(8, &e.ownedByUser)
+	stateSinkObject.Save(9, &e.rcvQueue)
+	stateSinkObject.Save(11, &e.connectionDirectionState)
+	stateSinkObject.Save(12, &e.boundNICID)
+	stateSinkObject.Save(13, &e.ipv4TTL)
+	stateSinkObject.Save(14, &e.ipv6HopLimit)
+	stateSinkObject.Save(15, &e.isConnectNotified)
+	stateSinkObject.Save(16, &e.h)
+	stateSinkObject.Save(17, &e.portFlags)
+	stateSinkObject.Save(18, &e.boundBindToDevice)
+	stateSinkObject.Save(19, &e.boundPortFlags)
+	stateSinkObject.Save(20, &e.boundDest)
+	stateSinkObject.Save(21, &e.effectiveNetProtos)
+	stateSinkObject.Save(22, &e.recentTSTime)
+	stateSinkObject.Save(23, &e.shutdownFlags)
+	stateSinkObject.Save(24, &e.tcpRecovery)
+	stateSinkObject.Save(25, &e.sack)
+	stateSinkObject.Save(26, &e.delay)
+	stateSinkObject.Save(27, &e.scoreboard)
+	stateSinkObject.Save(28, &e.segmentQueue)
+	stateSinkObject.Save(29, &e.userMSS)
+	stateSinkObject.Save(30, &e.maxSynRetries)
+	stateSinkObject.Save(31, &e.windowClamp)
+	stateSinkObject.Save(32, &e.sndQueueInfo)
+	stateSinkObject.Save(33, &e.cc)
+	stateSinkObject.Save(34, &e.keepalive)
+	stateSinkObject.Save(35, &e.userTimeout)
+	stateSinkObject.Save(36, &e.deferAccept)
+	stateSinkObject.Save(37, &e.acceptQueue)
+	stateSinkObject.Save(38, &e.rcv)
+	stateSinkObject.Save(39, &e.snd)
+	stateSinkObject.Save(40, &e.connectingAddress)
+	stateSinkObject.Save(41, &e.amss)
+	stateSinkObject.Save(42, &e.sendTOS)
+	stateSinkObject.Save(43, &e.gso)
+	stateSinkObject.Save(44, &e.stats)
+	stateSinkObject.Save(45, &e.tcpLingerTimeout)
+	stateSinkObject.Save(46, &e.closed)
+	stateSinkObject.Save(47, &e.txHash)
+	stateSinkObject.Save(48, &e.owner)
+	stateSinkObject.Save(49, &e.ops)
+	stateSinkObject.Save(50, &e.lastOutOfWindowAckTime)
+	stateSinkObject.Save(51, &e.pmtud)
 }
 
 // +checklocksignore
-func (e *endpoint) StateLoad(stateSourceObject state.Source) {
+func (e *Endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.TCPEndpointStateInner)
 	stateSourceObject.Load(1, &e.TransportEndpointInfo)
 	stateSourceObject.Load(2, &e.DefaultSocketOptionsHandler)
 	stateSourceObject.LoadWait(3, &e.waiterQueue)
-	stateSourceObject.Load(4, &e.uniqueID)
-	stateSourceObject.Load(5, &e.hardError)
-	stateSourceObject.Load(6, &e.lastError)
-	stateSourceObject.Load(7, &e.TCPRcvBufState)
-	stateSourceObject.Load(8, &e.rcvMemUsed)
-	stateSourceObject.Load(9, &e.ownedByUser)
-	stateSourceObject.LoadWait(10, &e.rcvQueue)
-	stateSourceObject.Load(12, &e.connectionDirectionState)
-	stateSourceObject.Load(13, &e.boundNICID)
-	stateSourceObject.Load(14, &e.ipv4TTL)
-	stateSourceObject.Load(15, &e.ipv6HopLimit)
-	stateSourceObject.Load(16, &e.isConnectNotified)
-	stateSourceObject.Load(17, &e.h)
-	stateSourceObject.Load(18, &e.portFlags)
-	stateSourceObject.Load(19, &e.boundBindToDevice)
-	stateSourceObject.Load(20, &e.boundPortFlags)
-	stateSourceObject.Load(21, &e.boundDest)
-	stateSourceObject.Load(22, &e.effectiveNetProtos)
-	stateSourceObject.Load(23, &e.recentTSTime)
-	stateSourceObject.Load(24, &e.shutdownFlags)
-	stateSourceObject.Load(25, &e.tcpRecovery)
-	stateSourceObject.Load(26, &e.sack)
-	stateSourceObject.Load(27, &e.delay)
-	stateSourceObject.Load(28, &e.scoreboard)
-	stateSourceObject.LoadWait(29, &e.segmentQueue)
-	stateSourceObject.Load(30, &e.userMSS)
-	stateSourceObject.Load(31, &e.maxSynRetries)
-	stateSourceObject.Load(32, &e.windowClamp)
-	stateSourceObject.Load(33, &e.sndQueueInfo)
-	stateSourceObject.Load(34, &e.cc)
-	stateSourceObject.Load(35, &e.keepalive)
-	stateSourceObject.Load(36, &e.userTimeout)
-	stateSourceObject.Load(37, &e.deferAccept)
-	stateSourceObject.Load(38, &e.acceptQueue)
-	stateSourceObject.LoadWait(39, &e.rcv)
-	stateSourceObject.LoadWait(40, &e.snd)
-	stateSourceObject.Load(41, &e.connectingAddress)
-	stateSourceObject.Load(42, &e.amss)
-	stateSourceObject.Load(43, &e.sendTOS)
-	stateSourceObject.Load(44, &e.gso)
-	stateSourceObject.Load(45, &e.stats)
-	stateSourceObject.Load(46, &e.tcpLingerTimeout)
-	stateSourceObject.Load(47, &e.closed)
-	stateSourceObject.Load(48, &e.txHash)
-	stateSourceObject.Load(49, &e.owner)
-	stateSourceObject.Load(50, &e.ops)
-	stateSourceObject.Load(51, &e.lastOutOfWindowAckTime)
-	stateSourceObject.LoadValue(11, new(EndpointState), func(y any) { e.loadState(y.(EndpointState)) })
-	stateSourceObject.AfterLoad(e.afterLoad)
+	stateSourceObject.Load(4, &e.hardError)
+	stateSourceObject.Load(5, &e.lastError)
+	stateSourceObject.Load(6, &e.TCPRcvBufState)
+	stateSourceObject.Load(7, &e.rcvMemUsed)
+	stateSourceObject.Load(8, &e.ownedByUser)
+	stateSourceObject.LoadWait(9, &e.rcvQueue)
+	stateSourceObject.Load(11, &e.connectionDirectionState)
+	stateSourceObject.Load(12, &e.boundNICID)
+	stateSourceObject.Load(13, &e.ipv4TTL)
+	stateSourceObject.Load(14, &e.ipv6HopLimit)
+	stateSourceObject.Load(15, &e.isConnectNotified)
+	stateSourceObject.Load(16, &e.h)
+	stateSourceObject.Load(17, &e.portFlags)
+	stateSourceObject.Load(18, &e.boundBindToDevice)
+	stateSourceObject.Load(19, &e.boundPortFlags)
+	stateSourceObject.Load(20, &e.boundDest)
+	stateSourceObject.Load(21, &e.effectiveNetProtos)
+	stateSourceObject.Load(22, &e.recentTSTime)
+	stateSourceObject.Load(23, &e.shutdownFlags)
+	stateSourceObject.Load(24, &e.tcpRecovery)
+	stateSourceObject.Load(25, &e.sack)
+	stateSourceObject.Load(26, &e.delay)
+	stateSourceObject.Load(27, &e.scoreboard)
+	stateSourceObject.LoadWait(28, &e.segmentQueue)
+	stateSourceObject.Load(29, &e.userMSS)
+	stateSourceObject.Load(30, &e.maxSynRetries)
+	stateSourceObject.Load(31, &e.windowClamp)
+	stateSourceObject.Load(32, &e.sndQueueInfo)
+	stateSourceObject.Load(33, &e.cc)
+	stateSourceObject.Load(34, &e.keepalive)
+	stateSourceObject.Load(35, &e.userTimeout)
+	stateSourceObject.Load(36, &e.deferAccept)
+	stateSourceObject.Load(37, &e.acceptQueue)
+	stateSourceObject.LoadWait(38, &e.rcv)
+	stateSourceObject.LoadWait(39, &e.snd)
+	stateSourceObject.Load(40, &e.connectingAddress)
+	stateSourceObject.Load(41, &e.amss)
+	stateSourceObject.Load(42, &e.sendTOS)
+	stateSourceObject.Load(43, &e.gso)
+	stateSourceObject.Load(44, &e.stats)
+	stateSourceObject.Load(45, &e.tcpLingerTimeout)
+	stateSourceObject.Load(46, &e.closed)
+	stateSourceObject.Load(47, &e.txHash)
+	stateSourceObject.Load(48, &e.owner)
+	stateSourceObject.Load(49, &e.ops)
+	stateSourceObject.Load(50, &e.lastOutOfWindowAckTime)
+	stateSourceObject.Load(51, &e.pmtud)
+	stateSourceObject.LoadValue(10, new(EndpointState), func(y any) { e.loadState(ctx, y.(EndpointState)) })
+	stateSourceObject.AfterLoad(func() { e.afterLoad(ctx) })
 }
 
 func (k *keepalive) StateTypeName() string {
@@ -523,16 +637,98 @@ func (k *keepalive) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(3, &k.unacked)
 }
 
-func (k *keepalive) afterLoad() {}
+func (k *keepalive) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (k *keepalive) StateLoad(stateSourceObject state.Source) {
+func (k *keepalive) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &k.idle)
 	stateSourceObject.Load(1, &k.interval)
 	stateSourceObject.Load(2, &k.count)
 	stateSourceObject.Load(3, &k.unacked)
 }
 
+func (p *protocol) StateTypeName() string {
+	return "pkg/tcpip/transport/tcp.protocol"
+}
+
+func (p *protocol) StateFields() []string {
+	return []string{
+		"stack",
+		"sackEnabled",
+		"recovery",
+		"delayEnabled",
+		"alwaysUseSynCookies",
+		"sendBufferSize",
+		"recvBufferSize",
+		"congestionControl",
+		"availableCongestionControl",
+		"moderateReceiveBuffer",
+		"lingerTimeout",
+		"timeWaitTimeout",
+		"timeWaitReuse",
+		"minRTO",
+		"maxRTO",
+		"maxRetries",
+		"synRetries",
+		"dispatcher",
+		"seqnumSecret",
+		"tsOffsetSecret",
+	}
+}
+
+func (p *protocol) beforeSave() {}
+
+// +checklocksignore
+func (p *protocol) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.stack)
+	stateSinkObject.Save(1, &p.sackEnabled)
+	stateSinkObject.Save(2, &p.recovery)
+	stateSinkObject.Save(3, &p.delayEnabled)
+	stateSinkObject.Save(4, &p.alwaysUseSynCookies)
+	stateSinkObject.Save(5, &p.sendBufferSize)
+	stateSinkObject.Save(6, &p.recvBufferSize)
+	stateSinkObject.Save(7, &p.congestionControl)
+	stateSinkObject.Save(8, &p.availableCongestionControl)
+	stateSinkObject.Save(9, &p.moderateReceiveBuffer)
+	stateSinkObject.Save(10, &p.lingerTimeout)
+	stateSinkObject.Save(11, &p.timeWaitTimeout)
+	stateSinkObject.Save(12, &p.timeWaitReuse)
+	stateSinkObject.Save(13, &p.minRTO)
+	stateSinkObject.Save(14, &p.maxRTO)
+	stateSinkObject.Save(15, &p.maxRetries)
+	stateSinkObject.Save(16, &p.synRetries)
+	stateSinkObject.Save(17, &p.dispatcher)
+	stateSinkObject.Save(18, &p.seqnumSecret)
+	stateSinkObject.Save(19, &p.tsOffsetSecret)
+}
+
+func (p *protocol) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *protocol) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.stack)
+	stateSourceObject.Load(1, &p.sackEnabled)
+	stateSourceObject.Load(2, &p.recovery)
+	stateSourceObject.Load(3, &p.delayEnabled)
+	stateSourceObject.Load(4, &p.alwaysUseSynCookies)
+	stateSourceObject.Load(5, &p.sendBufferSize)
+	stateSourceObject.Load(6, &p.recvBufferSize)
+	stateSourceObject.Load(7, &p.congestionControl)
+	stateSourceObject.Load(8, &p.availableCongestionControl)
+	stateSourceObject.Load(9, &p.moderateReceiveBuffer)
+	stateSourceObject.Load(10, &p.lingerTimeout)
+	stateSourceObject.Load(11, &p.timeWaitTimeout)
+	stateSourceObject.Load(12, &p.timeWaitReuse)
+	stateSourceObject.Load(13, &p.minRTO)
+	stateSourceObject.Load(14, &p.maxRTO)
+	stateSourceObject.Load(15, &p.maxRetries)
+	stateSourceObject.Load(16, &p.synRetries)
+	stateSourceObject.Load(17, &p.dispatcher)
+	stateSourceObject.Load(18, &p.seqnumSecret)
+	stateSourceObject.Load(19, &p.tsOffsetSecret)
+}
+
 func (rc *rackControl) StateTypeName() string {
 	return "pkg/tcpip/transport/tcp.rackControl"
 }
@@ -561,10 +757,10 @@ func (rc *rackControl) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(5, &rc.snd)
 }
 
-func (rc *rackControl) afterLoad() {}
+func (rc *rackControl) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (rc *rackControl) StateLoad(stateSourceObject state.Source) {
+func (rc *rackControl) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &rc.TCPRACKState)
 	stateSourceObject.Load(1, &rc.exitedRecovery)
 	stateSourceObject.Load(2, &rc.minRTT)
@@ -605,10 +801,10 @@ func (r *receiver) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(7, &r.lastRcvdAckTime)
 }
 
-func (r *receiver) afterLoad() {}
+func (r *receiver) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *receiver) StateLoad(stateSourceObject state.Source) {
+func (r *receiver) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.TCPReceiverState)
 	stateSourceObject.Load(1, &r.ep)
 	stateSourceObject.Load(2, &r.rcvWnd)
@@ -637,10 +833,10 @@ func (r *renoState) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &r.s)
 }
 
-func (r *renoState) afterLoad() {}
+func (r *renoState) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *renoState) StateLoad(stateSourceObject state.Source) {
+func (r *renoState) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.s)
 }
 
@@ -662,10 +858,10 @@ func (rr *renoRecovery) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &rr.s)
 }
 
-func (rr *renoRecovery) afterLoad() {}
+func (rr *renoRecovery) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (rr *renoRecovery) StateLoad(stateSourceObject state.Source) {
+func (rr *renoRecovery) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &rr.s)
 }
 
@@ -687,10 +883,10 @@ func (sr *sackRecovery) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &sr.s)
 }
 
-func (sr *sackRecovery) afterLoad() {}
+func (sr *sackRecovery) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (sr *sackRecovery) StateLoad(stateSourceObject state.Source) {
+func (sr *sackRecovery) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &sr.s)
 }
 
@@ -714,10 +910,10 @@ func (s *SACKScoreboard) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &s.maxSACKED)
 }
 
-func (s *SACKScoreboard) afterLoad() {}
+func (s *SACKScoreboard) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *SACKScoreboard) StateLoad(stateSourceObject state.Source) {
+func (s *SACKScoreboard) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &s.smss)
 	stateSourceObject.Load(1, &s.maxSACKED)
 }
@@ -780,10 +976,10 @@ func (s *segment) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(19, &s.lost)
 }
 
-func (s *segment) afterLoad() {}
+func (s *segment) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *segment) StateLoad(stateSourceObject state.Source) {
+func (s *segment) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &s.segmentEntry)
 	stateSourceObject.Load(1, &s.segmentRefs)
 	stateSourceObject.Load(2, &s.ep)
@@ -803,7 +999,7 @@ func (s *segment) StateLoad(stateSourceObject state.Source) {
 	stateSourceObject.Load(17, &s.acked)
 	stateSourceObject.Load(18, &s.dataMemSize)
 	stateSourceObject.Load(19, &s.lost)
-	stateSourceObject.LoadValue(12, new([]byte), func(y any) { s.loadOptions(y.([]byte)) })
+	stateSourceObject.LoadValue(12, new([]byte), func(y any) { s.loadOptions(ctx, y.([]byte)) })
 }
 
 func (q *segmentQueue) StateTypeName() string {
@@ -828,10 +1024,10 @@ func (q *segmentQueue) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &q.frozen)
 }
 
-func (q *segmentQueue) afterLoad() {}
+func (q *segmentQueue) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (q *segmentQueue) StateLoad(stateSourceObject state.Source) {
+func (q *segmentQueue) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.LoadWait(0, &q.list)
 	stateSourceObject.Load(1, &q.ep)
 	stateSourceObject.Load(2, &q.frozen)
@@ -859,6 +1055,7 @@ func (s *sender) StateFields() []string {
 		"rc",
 		"spuriousRecovery",
 		"retransmitTS",
+		"startCork",
 	}
 }
 
@@ -883,12 +1080,13 @@ func (s *sender) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(13, &s.rc)
 	stateSinkObject.Save(14, &s.spuriousRecovery)
 	stateSinkObject.Save(15, &s.retransmitTS)
+	stateSinkObject.Save(16, &s.startCork)
 }
 
-func (s *sender) afterLoad() {}
+func (s *sender) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (s *sender) StateLoad(stateSourceObject state.Source) {
+func (s *sender) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &s.TCPSenderState)
 	stateSourceObject.Load(1, &s.ep)
 	stateSourceObject.Load(2, &s.lr)
@@ -905,6 +1103,7 @@ func (s *sender) StateLoad(stateSourceObject state.Source) {
 	stateSourceObject.Load(13, &s.rc)
 	stateSourceObject.Load(14, &s.spuriousRecovery)
 	stateSourceObject.Load(15, &s.retransmitTS)
+	stateSourceObject.Load(16, &s.startCork)
 }
 
 func (r *rtt) StateTypeName() string {
@@ -925,10 +1124,10 @@ func (r *rtt) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &r.TCPRTTState)
 }
 
-func (r *rtt) afterLoad() {}
+func (r *rtt) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (r *rtt) StateLoad(stateSourceObject state.Source) {
+func (r *rtt) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.TCPRTTState)
 }
 
@@ -952,10 +1151,10 @@ func (l *endpointList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *endpointList) afterLoad() {}
+func (l *endpointList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *endpointList) StateLoad(stateSourceObject state.Source) {
+func (l *endpointList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
@@ -980,10 +1179,10 @@ func (e *endpointEntry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *endpointEntry) afterLoad() {}
+func (e *endpointEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *endpointEntry) StateLoad(stateSourceObject state.Source) {
+func (e *endpointEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
@@ -1008,10 +1207,10 @@ func (l *segmentList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *segmentList) afterLoad() {}
+func (l *segmentList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *segmentList) StateLoad(stateSourceObject state.Source) {
+func (l *segmentList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
@@ -1036,10 +1235,10 @@ func (e *segmentEntry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *segmentEntry) afterLoad() {}
+func (e *segmentEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *segmentEntry) StateLoad(stateSourceObject state.Source) {
+func (e *segmentEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
@@ -1063,22 +1262,27 @@ func (r *segmentRefs) StateSave(stateSinkObject state.Sink) {
 }
 
 // +checklocksignore
-func (r *segmentRefs) StateLoad(stateSourceObject state.Source) {
+func (r *segmentRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &r.refCount)
-	stateSourceObject.AfterLoad(r.afterLoad)
+	stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) })
 }
 
 func init() {
 	state.Register((*acceptQueue)(nil))
 	state.Register((*handshake)(nil))
 	state.Register((*cubicState)(nil))
+	state.Register((*epQueue)(nil))
+	state.Register((*processor)(nil))
+	state.Register((*dispatcher)(nil))
+	state.Register((*jenkinsHasher)(nil))
 	state.Register((*SACKInfo)(nil))
 	state.Register((*ReceiveErrors)(nil))
 	state.Register((*SendErrors)(nil))
 	state.Register((*Stats)(nil))
 	state.Register((*sndQueueInfo)(nil))
-	state.Register((*endpoint)(nil))
+	state.Register((*Endpoint)(nil))
 	state.Register((*keepalive)(nil))
+	state.Register((*protocol)(nil))
 	state.Register((*rackControl)(nil))
 	state.Register((*receiver)(nil))
 	state.Register((*renoState)(nil))
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/timer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/timer.go
index 20800926..7111789d 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/timer.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/timer.go
@@ -15,7 +15,6 @@
 package tcp
 
 import (
-	"math"
 	"time"
 
 	"gvisor.dev/gvisor/pkg/tcpip"
@@ -24,8 +23,10 @@ import (
 type timerState int
 
 const (
+	// The timer has not been initialized yet or has been cleaned up.
+	timerUninitialized timerState = iota
 	// The timer is disabled.
-	timerStateDisabled timerState = iota
+	timerStateDisabled
 	// The timer is enabled, but the clock timer may be set to an earlier
 	// expiration time due to a previous orphaned state.
 	timerStateEnabled
@@ -66,6 +67,9 @@ type timer struct {
 
 	// timer is the clock timer used to wait on.
 	timer tcpip.Timer
+
+	// callback is the function that's called when the timer expires.
+	callback func()
 }
 
 // init initializes the timer. Once it expires the function callback
@@ -73,11 +77,7 @@ type timer struct {
 func (t *timer) init(clock tcpip.Clock, f func()) {
 	t.state = timerStateDisabled
 	t.clock = clock
-
-	// Initialize a clock timer that will call the callback func, then
-	// immediately stop it.
-	t.timer = t.clock.AfterFunc(math.MaxInt64, f)
-	t.timer.Stop()
+	t.callback = f
 }
 
 // cleanup frees all resources associated with the timer.
@@ -90,15 +90,15 @@ func (t *timer) cleanup() {
 	*t = timer{}
 }
 
-// isZero returns true if the timer is in the zero state. This is usually
-// only true if init() has never been called or if cleanup has been called.
-func (t *timer) isZero() bool {
-	return *t == timer{}
+// isUninitialized returns true if the timer is in the uninitialized state. This
+// is only true if init() has never been called or if cleanup has been called.
+func (t *timer) isUninitialized() bool {
+	return t.state == timerUninitialized
 }
 
 // checkExpiration checks if the given timer has actually expired, it should be
 // called whenever the callback function is called, and is used to check if it's
-// a supurious timer expiration (due to a previously orphaned timer) or a
+// a spurious timer expiration (due to a previously orphaned timer) or a
 // legitimate one.
 func (t *timer) checkExpiration() bool {
 	// Transition to fully disabled state if we're just consuming an
@@ -143,8 +143,18 @@ func (t *timer) enable(d time.Duration) {
 	// Check if we need to set the runtime timer.
 	if t.state == timerStateDisabled || t.target.Before(t.clockTarget) {
 		t.clockTarget = t.target
-		t.timer.Reset(d)
+		t.resetOrStart(d)
 	}
 
 	t.state = timerStateEnabled
 }
+
+// resetOrStart creates the timer if it doesn't already exist or resets it with
+// the given duration if it does.
+func (t *timer) resetOrStart(d time.Duration) {
+	if t.timer == nil {
+		t.timer = t.clock.AfterFunc(d, t.callback)
+	} else {
+		t.timer.Reset(d)
+	}
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack/tcp_conntrack.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack/tcp_conntrack.go
index 012d3f60..4d74a6dd 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack/tcp_conntrack.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack/tcp_conntrack.go
@@ -55,14 +55,17 @@ const maxWindowShift = 14
 
 // TCB is a TCP Control Block. It holds state necessary to keep track of a TCP
 // connection and inform the caller when the connection has been closed.
+//
+// +stateify savable
 type TCB struct {
 	reply    stream
 	original stream
 
 	// State handlers. hdr is not guaranteed to contain bytes beyond the TCP
 	// header itself, i.e. it may not contain the payload.
-	handlerReply    func(tcb *TCB, hdr header.TCP, dataLen int) Result
-	handlerOriginal func(tcb *TCB, hdr header.TCP, dataLen int) Result
+	// TODO(b/341946753): Restore them when netstack is savable.
+	handlerReply    func(tcb *TCB, hdr header.TCP, dataLen int) Result `state:"nosave"`
+	handlerOriginal func(tcb *TCB, hdr header.TCP, dataLen int) Result `state:"nosave"`
 
 	// firstFin holds a pointer to the first stream to send a FIN.
 	firstFin *stream
@@ -321,6 +324,8 @@ func allOtherOriginal(t *TCB, tcp header.TCP, dataLen int) Result {
 }
 
 // streams holds the state of a TCP unidirectional stream.
+//
+// +stateify savable
 type stream struct {
 	// The interval [una, end) is the allowed interval as defined by the
 	// receiver, i.e., anything less than una has already been acknowledged
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack/tcpconntrack_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack/tcpconntrack_state_autogen.go
index ff53204d..fbf84e64 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack/tcpconntrack_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack/tcpconntrack_state_autogen.go
@@ -1,3 +1,91 @@
 // automatically generated by stateify.
 
 package tcpconntrack
+
+import (
+	"context"
+
+	"gvisor.dev/gvisor/pkg/state"
+)
+
+func (t *TCB) StateTypeName() string {
+	return "pkg/tcpip/transport/tcpconntrack.TCB"
+}
+
+func (t *TCB) StateFields() []string {
+	return []string{
+		"reply",
+		"original",
+		"firstFin",
+		"state",
+	}
+}
+
+func (t *TCB) beforeSave() {}
+
+// +checklocksignore
+func (t *TCB) StateSave(stateSinkObject state.Sink) {
+	t.beforeSave()
+	stateSinkObject.Save(0, &t.reply)
+	stateSinkObject.Save(1, &t.original)
+	stateSinkObject.Save(2, &t.firstFin)
+	stateSinkObject.Save(3, &t.state)
+}
+
+func (t *TCB) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (t *TCB) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &t.reply)
+	stateSourceObject.Load(1, &t.original)
+	stateSourceObject.Load(2, &t.firstFin)
+	stateSourceObject.Load(3, &t.state)
+}
+
+func (s *stream) StateTypeName() string {
+	return "pkg/tcpip/transport/tcpconntrack.stream"
+}
+
+func (s *stream) StateFields() []string {
+	return []string{
+		"una",
+		"nxt",
+		"end",
+		"finSeen",
+		"fin",
+		"rstSeen",
+		"shiftCnt",
+	}
+}
+
+func (s *stream) beforeSave() {}
+
+// +checklocksignore
+func (s *stream) StateSave(stateSinkObject state.Sink) {
+	s.beforeSave()
+	stateSinkObject.Save(0, &s.una)
+	stateSinkObject.Save(1, &s.nxt)
+	stateSinkObject.Save(2, &s.end)
+	stateSinkObject.Save(3, &s.finSeen)
+	stateSinkObject.Save(4, &s.fin)
+	stateSinkObject.Save(5, &s.rstSeen)
+	stateSinkObject.Save(6, &s.shiftCnt)
+}
+
+func (s *stream) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (s *stream) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &s.una)
+	stateSourceObject.Load(1, &s.nxt)
+	stateSourceObject.Load(2, &s.end)
+	stateSourceObject.Load(3, &s.finSeen)
+	stateSourceObject.Load(4, &s.fin)
+	stateSourceObject.Load(5, &s.rstSeen)
+	stateSourceObject.Load(6, &s.shiftCnt)
+}
+
+func init() {
+	state.Register((*TCB)(nil))
+	state.Register((*stream)(nil))
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go
index eab6c95f..f8e30579 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go
@@ -21,7 +21,6 @@ import (
 	"math"
 	"time"
 
-	"gvisor.dev/gvisor/pkg/buffer"
 	"gvisor.dev/gvisor/pkg/sync"
 	"gvisor.dev/gvisor/pkg/tcpip"
 	"gvisor.dev/gvisor/pkg/tcpip/checksum"
@@ -40,7 +39,7 @@ type udpPacket struct {
 	senderAddress      tcpip.FullAddress
 	destinationAddress tcpip.FullAddress
 	packetInfo         tcpip.IPPacketInfo
-	pkt                stack.PacketBufferPtr
+	pkt                *stack.PacketBuffer
 	receivedAt         time.Time `state:".(int64)"`
 	// tosOrTClass stores either the Type of Service for IPv4 or the Traffic Class
 	// for IPv6.
@@ -64,7 +63,6 @@ type endpoint struct {
 	// change throughout the lifetime of the endpoint.
 	stack       *stack.Stack `state:"manual"`
 	waiterQueue *waiter.Queue
-	uniqueID    uint64
 	net         network.Endpoint
 	stats       tcpip.TransportEndpointStats
 	ops         tcpip.SocketOptions
@@ -111,7 +109,6 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQue
 	e := &endpoint{
 		stack:       s,
 		waiterQueue: waiterQueue,
-		uniqueID:    s.UniqueID(),
 	}
 	e.ops.InitHandler(e, e.stack, tcpip.GetStackSendBufferLimits, tcpip.GetStackReceiveBufferLimits)
 	e.ops.SetMulticastLoop(true)
@@ -138,11 +135,6 @@ func (e *endpoint) WakeupWriters() {
 	e.net.MaybeSignalWritable()
 }
 
-// UniqueID implements stack.TransportEndpoint.
-func (e *endpoint) UniqueID() uint64 {
-	return e.uniqueID
-}
-
 func (e *endpoint) LastError() tcpip.Error {
 	e.lastErrorMu.Lock()
 	defer e.lastErrorMu.Unlock()
@@ -436,16 +428,8 @@ func (e *endpoint) prepareForWrite(p tcpip.Payloader, opts tcpip.WriteOptions) (
 		return udpPacketInfo{}, &tcpip.ErrMessageTooLong{}
 	}
 
-	var buf buffer.Buffer
-	if _, err := buf.WriteFromReader(p, int64(p.Len())); err != nil {
-		buf.Release()
-		ctx.Release()
-		return udpPacketInfo{}, &tcpip.ErrBadBuffer{}
-	}
-
 	return udpPacketInfo{
 		ctx:        ctx,
-		data:       buf,
 		localPort:  e.localPort,
 		remotePort: dst.Port,
 	}, nil
@@ -473,10 +457,10 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp
 	}
 	defer udpInfo.ctx.Release()
 
-	dataSz := udpInfo.data.Size()
+	dataSz := p.Len()
 	pktInfo := udpInfo.ctx.PacketInfo()
-	pkt := udpInfo.ctx.TryNewPacketBuffer(header.UDPMinimumSize+int(pktInfo.MaxHeaderLength), udpInfo.data)
-	if pkt.IsNil() {
+	pkt := udpInfo.ctx.TryNewPacketBufferFromPayloader(header.UDPMinimumSize+int(pktInfo.MaxHeaderLength), p)
+	if pkt == nil {
 		return 0, &tcpip.ErrWouldBlock{}
 	}
 	defer pkt.DecRef()
@@ -593,7 +577,6 @@ func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error {
 // udpPacketInfo holds information needed to send a UDP packet.
 type udpPacketInfo struct {
 	ctx        network.WriteContext
-	data       buffer.Buffer
 	localPort  uint16
 	remotePort uint16
 }
@@ -679,11 +662,6 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) tcpip.Error {
 
 		oldPortFlags := e.boundPortFlags
 
-		nextID, btd, err := e.registerWithStack(netProtos, nextID)
-		if err != nil {
-			return err
-		}
-
 		// Remove the old registration.
 		if e.localPort != 0 {
 			previousID.LocalPort = e.localPort
@@ -691,6 +669,11 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) tcpip.Error {
 			e.stack.UnregisterTransportEndpoint(e.effectiveNetProtos, ProtocolNumber, previousID, e, oldPortFlags, e.boundBindToDevice)
 		}
 
+		nextID, btd, err := e.registerWithStack(netProtos, nextID)
+		if err != nil {
+			return err
+		}
+
 		e.localPort = nextID.LocalPort
 		e.remotePort = nextID.RemotePort
 		e.boundBindToDevice = btd
@@ -773,7 +756,7 @@ func (e *endpoint) registerWithStack(netProtos []tcpip.NetworkProtocolNumber, id
 			BindToDevice: bindToDevice,
 			Dest:         tcpip.FullAddress{},
 		}
-		port, err := e.stack.ReservePort(e.stack.Rand(), portRes, nil /* testPort */)
+		port, err := e.stack.ReservePort(e.stack.SecureRNG(), portRes, nil /* testPort */)
 		if err != nil {
 			return id, bindToDevice, err
 		}
@@ -908,7 +891,7 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {
 
 // HandlePacket is called by the stack when new packets arrive to this transport
 // endpoint.
-func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) {
+func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) {
 	// Get the header then trim it from the view.
 	hdr := header.UDP(pkt.TransportHeader().Slice())
 	netHdr := pkt.Network()
@@ -1000,7 +983,7 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketBu
 	}
 }
 
-func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, pkt stack.PacketBufferPtr) {
+func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, pkt *stack.PacketBuffer) {
 	// Update last error first.
 	e.lastErrorMu.Lock()
 	e.lastError = err
@@ -1050,7 +1033,7 @@ func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, p
 }
 
 // HandleError implements stack.TransportEndpoint.
-func (e *endpoint) HandleError(transErr stack.TransportError, pkt stack.PacketBufferPtr) {
+func (e *endpoint) HandleError(transErr stack.TransportError, pkt *stack.PacketBuffer) {
 	// TODO(gvisor.dev/issues/5270): Handle all transport errors.
 	switch transErr.Kind() {
 	case stack.DestinationPortUnreachableTransportError:
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint_state.go
index 546840b6..488e4660 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint_state.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint_state.go
@@ -15,6 +15,7 @@
 package udp
 
 import (
+	"context"
 	"fmt"
 	"time"
 
@@ -29,22 +30,23 @@ func (p *udpPacket) saveReceivedAt() int64 {
 }
 
 // loadReceivedAt is invoked by stateify.
-func (p *udpPacket) loadReceivedAt(nsec int64) {
+func (p *udpPacket) loadReceivedAt(_ context.Context, nsec int64) {
 	p.receivedAt = time.Unix(0, nsec)
 }
 
 // afterLoad is invoked by stateify.
-func (e *endpoint) afterLoad() {
-	stack.StackFromEnv.RegisterRestoredEndpoint(e)
+func (e *endpoint) afterLoad(ctx context.Context) {
+	stack.RestoreStackFromContext(ctx).RegisterRestoredEndpoint(e)
 }
 
 // beforeSave is invoked by stateify.
 func (e *endpoint) beforeSave() {
 	e.freeze()
+	e.stack.RegisterResumableEndpoint(e)
 }
 
-// Resume implements tcpip.ResumableEndpoint.Resume.
-func (e *endpoint) Resume(s *stack.Stack) {
+// Restore implements tcpip.RestoredEndpoint.Restore.
+func (e *endpoint) Restore(s *stack.Stack) {
 	e.thaw()
 
 	e.mu.Lock()
@@ -75,3 +77,8 @@ func (e *endpoint) Resume(s *stack.Stack) {
 		panic(fmt.Sprintf("unhandled state = %s", state))
 	}
 }
+
+// Resume implements tcpip.ResumableEndpoint.Resume.
+func (e *endpoint) Resume() {
+	e.thaw()
+}
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go
index 711a5ed3..7950abe5 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go
@@ -43,7 +43,7 @@ func NewForwarder(s *stack.Stack, handler func(*ForwarderRequest)) *Forwarder {
 //
 // This function is expected to be passed as an argument to the
 // stack.SetTransportProtocolHandler function.
-func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) bool {
+func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
 	f.handler(&ForwarderRequest{
 		stack: f.stack,
 		id:    id,
@@ -59,7 +59,7 @@ func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketB
 type ForwarderRequest struct {
 	stack *stack.Stack
 	id    stack.TransportEndpointID
-	pkt   stack.PacketBufferPtr
+	pkt   *stack.PacketBuffer
 }
 
 // ID returns the 4-tuple (src address, src port, dst address, dst port) that
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/protocol.go
index d4de0d2b..49870ab8 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/protocol.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/protocol.go
@@ -43,6 +43,7 @@ const (
 	MaxBufferSize = 4 << 20 // 4MiB
 )
 
+// +stateify savable
 type protocol struct {
 	stack *stack.Stack
 }
@@ -77,7 +78,7 @@ func (*protocol) ParsePorts(v []byte) (src, dst uint16, err tcpip.Error) {
 
 // HandleUnknownDestinationPacket handles packets that are targeted at this
 // protocol but don't match any existing endpoint.
-func (p *protocol) HandleUnknownDestinationPacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) stack.UnknownDestinationPacketDisposition {
+func (p *protocol) HandleUnknownDestinationPacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) stack.UnknownDestinationPacketDisposition {
 	hdr := header.UDP(pkt.TransportHeader().Slice())
 	netHdr := pkt.Network()
 	lengthValid, csumValid := header.UDPValid(
@@ -124,7 +125,7 @@ func (*protocol) Pause() {}
 func (*protocol) Resume() {}
 
 // Parse implements stack.TransportProtocol.Parse.
-func (*protocol) Parse(pkt stack.PacketBufferPtr) bool {
+func (*protocol) Parse(pkt *stack.PacketBuffer) bool {
 	return parse.UDP(pkt)
 }
 
diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/udp_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/udp_state_autogen.go
index c624ba41..e10d21cd 100644
--- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/udp_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/udp_state_autogen.go
@@ -3,6 +3,8 @@
 package udp
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -42,10 +44,10 @@ func (p *udpPacket) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(8, &p.ttlOrHopLimit)
 }
 
-func (p *udpPacket) afterLoad() {}
+func (p *udpPacket) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (p *udpPacket) StateLoad(stateSourceObject state.Source) {
+func (p *udpPacket) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &p.udpPacketEntry)
 	stateSourceObject.Load(1, &p.netProto)
 	stateSourceObject.Load(2, &p.senderAddress)
@@ -54,7 +56,7 @@ func (p *udpPacket) StateLoad(stateSourceObject state.Source) {
 	stateSourceObject.Load(5, &p.pkt)
 	stateSourceObject.Load(7, &p.tosOrTClass)
 	stateSourceObject.Load(8, &p.ttlOrHopLimit)
-	stateSourceObject.LoadValue(6, new(int64), func(y any) { p.loadReceivedAt(y.(int64)) })
+	stateSourceObject.LoadValue(6, new(int64), func(y any) { p.loadReceivedAt(ctx, y.(int64)) })
 }
 
 func (e *endpoint) StateTypeName() string {
@@ -65,7 +67,6 @@ func (e *endpoint) StateFields() []string {
 	return []string{
 		"DefaultSocketOptionsHandler",
 		"waiterQueue",
-		"uniqueID",
 		"net",
 		"stats",
 		"ops",
@@ -90,47 +91,70 @@ func (e *endpoint) StateSave(stateSinkObject state.Sink) {
 	e.beforeSave()
 	stateSinkObject.Save(0, &e.DefaultSocketOptionsHandler)
 	stateSinkObject.Save(1, &e.waiterQueue)
-	stateSinkObject.Save(2, &e.uniqueID)
-	stateSinkObject.Save(3, &e.net)
-	stateSinkObject.Save(4, &e.stats)
-	stateSinkObject.Save(5, &e.ops)
-	stateSinkObject.Save(6, &e.rcvReady)
-	stateSinkObject.Save(7, &e.rcvList)
-	stateSinkObject.Save(8, &e.rcvBufSize)
-	stateSinkObject.Save(9, &e.rcvClosed)
-	stateSinkObject.Save(10, &e.lastError)
-	stateSinkObject.Save(11, &e.portFlags)
-	stateSinkObject.Save(12, &e.boundBindToDevice)
-	stateSinkObject.Save(13, &e.boundPortFlags)
-	stateSinkObject.Save(14, &e.readShutdown)
-	stateSinkObject.Save(15, &e.effectiveNetProtos)
-	stateSinkObject.Save(16, &e.frozen)
-	stateSinkObject.Save(17, &e.localPort)
-	stateSinkObject.Save(18, &e.remotePort)
+	stateSinkObject.Save(2, &e.net)
+	stateSinkObject.Save(3, &e.stats)
+	stateSinkObject.Save(4, &e.ops)
+	stateSinkObject.Save(5, &e.rcvReady)
+	stateSinkObject.Save(6, &e.rcvList)
+	stateSinkObject.Save(7, &e.rcvBufSize)
+	stateSinkObject.Save(8, &e.rcvClosed)
+	stateSinkObject.Save(9, &e.lastError)
+	stateSinkObject.Save(10, &e.portFlags)
+	stateSinkObject.Save(11, &e.boundBindToDevice)
+	stateSinkObject.Save(12, &e.boundPortFlags)
+	stateSinkObject.Save(13, &e.readShutdown)
+	stateSinkObject.Save(14, &e.effectiveNetProtos)
+	stateSinkObject.Save(15, &e.frozen)
+	stateSinkObject.Save(16, &e.localPort)
+	stateSinkObject.Save(17, &e.remotePort)
 }
 
 // +checklocksignore
-func (e *endpoint) StateLoad(stateSourceObject state.Source) {
+func (e *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.DefaultSocketOptionsHandler)
 	stateSourceObject.Load(1, &e.waiterQueue)
-	stateSourceObject.Load(2, &e.uniqueID)
-	stateSourceObject.Load(3, &e.net)
-	stateSourceObject.Load(4, &e.stats)
-	stateSourceObject.Load(5, &e.ops)
-	stateSourceObject.Load(6, &e.rcvReady)
-	stateSourceObject.Load(7, &e.rcvList)
-	stateSourceObject.Load(8, &e.rcvBufSize)
-	stateSourceObject.Load(9, &e.rcvClosed)
-	stateSourceObject.Load(10, &e.lastError)
-	stateSourceObject.Load(11, &e.portFlags)
-	stateSourceObject.Load(12, &e.boundBindToDevice)
-	stateSourceObject.Load(13, &e.boundPortFlags)
-	stateSourceObject.Load(14, &e.readShutdown)
-	stateSourceObject.Load(15, &e.effectiveNetProtos)
-	stateSourceObject.Load(16, &e.frozen)
-	stateSourceObject.Load(17, &e.localPort)
-	stateSourceObject.Load(18, &e.remotePort)
-	stateSourceObject.AfterLoad(e.afterLoad)
+	stateSourceObject.Load(2, &e.net)
+	stateSourceObject.Load(3, &e.stats)
+	stateSourceObject.Load(4, &e.ops)
+	stateSourceObject.Load(5, &e.rcvReady)
+	stateSourceObject.Load(6, &e.rcvList)
+	stateSourceObject.Load(7, &e.rcvBufSize)
+	stateSourceObject.Load(8, &e.rcvClosed)
+	stateSourceObject.Load(9, &e.lastError)
+	stateSourceObject.Load(10, &e.portFlags)
+	stateSourceObject.Load(11, &e.boundBindToDevice)
+	stateSourceObject.Load(12, &e.boundPortFlags)
+	stateSourceObject.Load(13, &e.readShutdown)
+	stateSourceObject.Load(14, &e.effectiveNetProtos)
+	stateSourceObject.Load(15, &e.frozen)
+	stateSourceObject.Load(16, &e.localPort)
+	stateSourceObject.Load(17, &e.remotePort)
+	stateSourceObject.AfterLoad(func() { e.afterLoad(ctx) })
+}
+
+func (p *protocol) StateTypeName() string {
+	return "pkg/tcpip/transport/udp.protocol"
+}
+
+func (p *protocol) StateFields() []string {
+	return []string{
+		"stack",
+	}
+}
+
+func (p *protocol) beforeSave() {}
+
+// +checklocksignore
+func (p *protocol) StateSave(stateSinkObject state.Sink) {
+	p.beforeSave()
+	stateSinkObject.Save(0, &p.stack)
+}
+
+func (p *protocol) afterLoad(context.Context) {}
+
+// +checklocksignore
+func (p *protocol) StateLoad(ctx context.Context, stateSourceObject state.Source) {
+	stateSourceObject.Load(0, &p.stack)
 }
 
 func (l *udpPacketList) StateTypeName() string {
@@ -153,10 +177,10 @@ func (l *udpPacketList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *udpPacketList) afterLoad() {}
+func (l *udpPacketList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *udpPacketList) StateLoad(stateSourceObject state.Source) {
+func (l *udpPacketList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
@@ -181,10 +205,10 @@ func (e *udpPacketEntry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *udpPacketEntry) afterLoad() {}
+func (e *udpPacketEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *udpPacketEntry) StateLoad(stateSourceObject state.Source) {
+func (e *udpPacketEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
@@ -192,6 +216,7 @@ func (e *udpPacketEntry) StateLoad(stateSourceObject state.Source) {
 func init() {
 	state.Register((*udpPacket)(nil))
 	state.Register((*endpoint)(nil))
+	state.Register((*protocol)(nil))
 	state.Register((*udpPacketList)(nil))
 	state.Register((*udpPacketEntry)(nil))
 }
diff --git a/vendor/gvisor.dev/gvisor/pkg/waiter/waiter.go b/vendor/gvisor.dev/gvisor/pkg/waiter/waiter.go
index 89a332c1..1b47ae1b 100644
--- a/vendor/gvisor.dev/gvisor/pkg/waiter/waiter.go
+++ b/vendor/gvisor.dev/gvisor/pkg/waiter/waiter.go
@@ -77,7 +77,7 @@ const (
 	EventInternal EventMask = 0x1000
 	EventRdHUp    EventMask = 0x2000 // POLLRDHUP
 
-	allEvents      EventMask = 0x1f | EventRdNorm | EventWrNorm | EventRdHUp
+	AllEvents      EventMask = 0x1f | EventRdNorm | EventWrNorm | EventRdHUp
 	ReadableEvents EventMask = EventIn | EventRdNorm
 	WritableEvents EventMask = EventOut | EventWrNorm
 )
@@ -86,7 +86,7 @@ const (
 // from the Linux events e, which is in the format used by poll(2).
 func EventMaskFromLinux(e uint32) EventMask {
 	// Our flag definitions are currently identical to Linux.
-	return EventMask(e) & allEvents
+	return EventMask(e) & AllEvents
 }
 
 // ToLinux returns e in the format used by Linux poll(2).
diff --git a/vendor/gvisor.dev/gvisor/pkg/waiter/waiter_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/waiter/waiter_state_autogen.go
index e785fe3b..91d35041 100644
--- a/vendor/gvisor.dev/gvisor/pkg/waiter/waiter_state_autogen.go
+++ b/vendor/gvisor.dev/gvisor/pkg/waiter/waiter_state_autogen.go
@@ -3,6 +3,8 @@
 package waiter
 
 import (
+	"context"
+
 	"gvisor.dev/gvisor/pkg/state"
 )
 
@@ -28,10 +30,10 @@ func (e *Entry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(2, &e.mask)
 }
 
-func (e *Entry) afterLoad() {}
+func (e *Entry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *Entry) StateLoad(stateSourceObject state.Source) {
+func (e *Entry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.waiterEntry)
 	stateSourceObject.Load(1, &e.eventListener)
 	stateSourceObject.Load(2, &e.mask)
@@ -55,10 +57,10 @@ func (q *Queue) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(0, &q.list)
 }
 
-func (q *Queue) afterLoad() {}
+func (q *Queue) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (q *Queue) StateLoad(stateSourceObject state.Source) {
+func (q *Queue) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &q.list)
 }
 
@@ -82,10 +84,10 @@ func (l *waiterList) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &l.tail)
 }
 
-func (l *waiterList) afterLoad() {}
+func (l *waiterList) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (l *waiterList) StateLoad(stateSourceObject state.Source) {
+func (l *waiterList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &l.head)
 	stateSourceObject.Load(1, &l.tail)
 }
@@ -110,10 +112,10 @@ func (e *waiterEntry) StateSave(stateSinkObject state.Sink) {
 	stateSinkObject.Save(1, &e.prev)
 }
 
-func (e *waiterEntry) afterLoad() {}
+func (e *waiterEntry) afterLoad(context.Context) {}
 
 // +checklocksignore
-func (e *waiterEntry) StateLoad(stateSourceObject state.Source) {
+func (e *waiterEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
 	stateSourceObject.Load(0, &e.next)
 	stateSourceObject.Load(1, &e.prev)
 }
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 1800eb97..a9b02f38 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -195,8 +195,8 @@ golang.org/x/text/internal/utf8internal
 golang.org/x/text/language
 golang.org/x/text/runes
 golang.org/x/text/transform
-# golang.org/x/time v0.3.0
-## explicit
+# golang.org/x/time v0.5.0
+## explicit; go 1.18
 golang.org/x/time/rate
 # golang.org/x/tools v0.22.0
 ## explicit; go 1.19
@@ -223,8 +223,8 @@ gopkg.in/tomb.v1
 # gopkg.in/yaml.v3 v3.0.1
 ## explicit
 gopkg.in/yaml.v3
-# gvisor.dev/gvisor v0.0.0-20231023213702-2691a8f9b1cf
-## explicit; go 1.20
+# gvisor.dev/gvisor v0.0.0-20240726212243-a2b0498dbe7d
+## explicit; go 1.22.0
 gvisor.dev/gvisor/pkg/atomicbitops
 gvisor.dev/gvisor/pkg/bits
 gvisor.dev/gvisor/pkg/buffer